|
4 | 4 | "context" |
5 | 5 | "flag" |
6 | 6 | "fmt" |
| 7 | + "github.com/cortexproject/cortex/pkg/storegateway" |
7 | 8 | "hash/fnv" |
8 | 9 | "math/rand" |
9 | 10 | "os" |
@@ -209,6 +210,8 @@ type Config struct { |
209 | 210 | BlockVisitMarkerFileUpdateInterval time.Duration `yaml:"block_visit_marker_file_update_interval"` |
210 | 211 |
|
211 | 212 | AcceptMalformedIndex bool `yaml:"accept_malformed_index"` |
| 213 | + |
| 214 | + BucketIndexMetadataFetcherEnabled bool `yaml:"bucket_index_metadata_fetcher_enabled"` |
212 | 215 | } |
213 | 216 |
|
214 | 217 | // RegisterFlags registers the Compactor flags. |
@@ -320,6 +323,9 @@ type Compactor struct { |
320 | 323 | ringSubservices *services.Manager |
321 | 324 | ringSubservicesWatcher *services.FailureWatcher |
322 | 325 |
|
| 326 | + //sharding strategy |
| 327 | + shardingStrategy storegateway.ShardingStrategy |
| 328 | + |
323 | 329 | // Metrics. |
324 | 330 | compactionRunsStarted prometheus.Counter |
325 | 331 | compactionRunsInterrupted prometheus.Counter |
@@ -474,6 +480,30 @@ func newCompactor( |
474 | 480 | if len(compactorCfg.DisabledTenants) > 0 { |
475 | 481 | level.Info(c.logger).Log("msg", "compactor using disabled users", "disabled", strings.Join(compactorCfg.DisabledTenants, ", ")) |
476 | 482 | } |
| 483 | + var err error |
| 484 | + if c.compactorCfg.ShardingEnabled { |
| 485 | + lifecyclerCfg := c.compactorCfg.ShardingRing.ToLifecyclerConfig() |
| 486 | + c.ringLifecycler, err = ring.NewLifecycler(lifecyclerCfg, ring.NewNoopFlushTransferer(), "compactor", ringKey, true, false, c.logger, prometheus.WrapRegistererWithPrefix("cortex_", c.registerer)) |
| 487 | + if err != nil { |
| 488 | + return nil, errors.Wrap(err, "unable to initialize compactor ring lifecycler") |
| 489 | + } |
| 490 | + |
| 491 | + c.ring, err = ring.New(lifecyclerCfg.RingConfig, "compactor", ringKey, c.logger, prometheus.WrapRegistererWithPrefix("cortex_", c.registerer)) |
| 492 | + if err != nil { |
| 493 | + return nil, errors.Wrap(err, "unable to initialize compactor ring") |
| 494 | + } |
| 495 | + // Instance the right strategy. |
| 496 | + switch c.compactorCfg.ShardingStrategy { |
| 497 | + case util.ShardingStrategyDefault: |
| 498 | + c.shardingStrategy = storegateway.NewDefaultShardingStrategy(c.ring, c.ringLifecycler.Addr, logger) |
| 499 | + case util.ShardingStrategyShuffle: |
| 500 | + c.shardingStrategy = storegateway.NewShuffleShardingStrategy(c.ring, lifecyclerCfg.ID, lifecyclerCfg.Addr, limits, logger, c.compactorCfg.ShardingRing.ZoneStableShuffleSharding) |
| 501 | + default: |
| 502 | + return nil, errInvalidShardingStrategy |
| 503 | + } |
| 504 | + } else { |
| 505 | + c.shardingStrategy = storegateway.NewNoShardingStrategy() |
| 506 | + } |
477 | 507 |
|
478 | 508 | c.Service = services.NewBasicService(c.starting, c.running, c.stopping) |
479 | 509 |
|
@@ -516,17 +546,6 @@ func (c *Compactor) starting(ctx context.Context) error { |
516 | 546 |
|
517 | 547 | // Initialize the compactors ring if sharding is enabled. |
518 | 548 | if c.compactorCfg.ShardingEnabled { |
519 | | - lifecyclerCfg := c.compactorCfg.ShardingRing.ToLifecyclerConfig() |
520 | | - c.ringLifecycler, err = ring.NewLifecycler(lifecyclerCfg, ring.NewNoopFlushTransferer(), "compactor", ringKey, true, false, c.logger, prometheus.WrapRegistererWithPrefix("cortex_", c.registerer)) |
521 | | - if err != nil { |
522 | | - return errors.Wrap(err, "unable to initialize compactor ring lifecycler") |
523 | | - } |
524 | | - |
525 | | - c.ring, err = ring.New(lifecyclerCfg.RingConfig, "compactor", ringKey, c.logger, prometheus.WrapRegistererWithPrefix("cortex_", c.registerer)) |
526 | | - if err != nil { |
527 | | - return errors.Wrap(err, "unable to initialize compactor ring") |
528 | | - } |
529 | | - |
530 | 549 | c.ringSubservices, err = services.NewManager(c.ringLifecycler, c.ring) |
531 | 550 | if err == nil { |
532 | 551 | c.ringSubservicesWatcher = services.NewFailureWatcher() |
@@ -570,7 +589,6 @@ func (c *Compactor) starting(ctx context.Context) error { |
570 | 589 | } |
571 | 590 | } |
572 | 591 | } |
573 | | - |
574 | 592 | // Ensure an initial cleanup occurred before starting the compactor. |
575 | 593 | if err := services.StartAndAwaitRunning(ctx, c.blocksCleaner); err != nil { |
576 | 594 | c.ringSubservices.StopAsync() |
@@ -789,28 +807,41 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { |
789 | 807 | // Filters out blocks with no compaction maker; blocks can be marked as no compaction for reasons like |
790 | 808 | // out of order chunks or index file too big. |
791 | 809 | noCompactMarkerFilter := compact.NewGatherNoCompactionMarkFilter(ulogger, bucket, c.compactorCfg.MetaSyncConcurrency) |
792 | | - |
793 | | - fetcher, err := block.NewMetaFetcher( |
794 | | - ulogger, |
795 | | - c.compactorCfg.MetaSyncConcurrency, |
796 | | - bucket, |
797 | | - c.metaSyncDirForUser(userID), |
798 | | - reg, |
799 | | - // List of filters to apply (order matters). |
800 | | - []block.MetadataFilter{ |
801 | | - // Remove the ingester ID because we don't shard blocks anymore, while still |
802 | | - // honoring the shard ID if sharding was done in the past. |
803 | | - NewLabelRemoverFilter([]string{cortex_tsdb.IngesterIDExternalLabel}), |
804 | | - block.NewConsistencyDelayMetaFilter(ulogger, c.compactorCfg.ConsistencyDelay, reg), |
805 | | - ignoreDeletionMarkFilter, |
806 | | - deduplicateBlocksFilter, |
807 | | - noCompactMarkerFilter, |
808 | | - }, |
809 | | - ) |
810 | | - if err != nil { |
811 | | - return err |
| 810 | + var fetcher block.MetadataFetcher |
| 811 | + var err error |
| 812 | + filters := []block.MetadataFilter{ |
| 813 | + // Remove the ingester ID because we don't shard blocks anymore, while still |
| 814 | + // honoring the shard ID if sharding was done in the past. |
| 815 | + NewLabelRemoverFilter([]string{cortex_tsdb.IngesterIDExternalLabel}), |
| 816 | + block.NewConsistencyDelayMetaFilter(ulogger, c.compactorCfg.ConsistencyDelay, reg), |
| 817 | + ignoreDeletionMarkFilter, |
| 818 | + deduplicateBlocksFilter, |
| 819 | + noCompactMarkerFilter, |
| 820 | + } |
| 821 | + if c.compactorCfg.BucketIndexMetadataFetcherEnabled { |
| 822 | + fetcher = storegateway.NewBucketIndexMetadataFetcher( |
| 823 | + userID, |
| 824 | + bucket, |
| 825 | + c.shardingStrategy, |
| 826 | + c.limits, |
| 827 | + ulogger, |
| 828 | + reg, |
| 829 | + filters, |
| 830 | + ) |
| 831 | + } else { |
| 832 | + fetcher, err = block.NewMetaFetcher( |
| 833 | + ulogger, |
| 834 | + c.compactorCfg.MetaSyncConcurrency, |
| 835 | + bucket, |
| 836 | + c.metaSyncDirForUser(userID), |
| 837 | + reg, |
| 838 | + // List of filters to apply (order matters). |
| 839 | + filters, |
| 840 | + ) |
| 841 | + if err != nil { |
| 842 | + return err |
| 843 | + } |
812 | 844 | } |
813 | | - |
814 | 845 | syncer, err := compact.NewMetaSyncer( |
815 | 846 | ulogger, |
816 | 847 | reg, |
|
0 commit comments