Skip to content

Commit e16673f

Browse files
authored
Enforce integration tests default flags config to never be overwritten (#3370)
* Enforce integration tests default flags config to never be overwritten Signed-off-by: Marco Pracucci <[email protected]> * Fixed integration test Signed-off-by: Marco Pracucci <[email protected]>
1 parent 40d8240 commit e16673f

15 files changed

+117
-102
lines changed

integration/alertmanager_test.go

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@ func TestAlertmanager(t *testing.T) {
2525
alertmanager := e2ecortex.NewAlertmanager(
2626
"alertmanager",
2727
mergeFlags(
28-
AlertmanagerFlags,
29-
AlertmanagerLocalFlags,
28+
AlertmanagerFlags(),
29+
AlertmanagerLocalFlags(),
3030
),
3131
"",
3232
)
@@ -56,15 +56,14 @@ func TestAlertmanagerStoreAPI(t *testing.T) {
5656
require.NoError(t, err)
5757
defer s.Close()
5858

59-
minio := e2edb.NewMinio(9000, AlertmanagerS3Flags["-alertmanager.storage.s3.buckets"])
59+
flags := mergeFlags(AlertmanagerFlags(), AlertmanagerS3Flags())
60+
61+
minio := e2edb.NewMinio(9000, flags["-alertmanager.storage.s3.buckets"])
6062
require.NoError(t, s.StartAndWaitReady(minio))
6163

6264
am := e2ecortex.NewAlertmanager(
6365
"alertmanager",
64-
mergeFlags(
65-
AlertmanagerFlags,
66-
AlertmanagerS3Flags,
67-
),
66+
flags,
6867
"",
6968
)
7069

integration/backward_compatibility_test.go

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ func preCortex14Flags(flags map[string]string) map[string]string {
4141
func TestBackwardCompatibilityWithChunksStorage(t *testing.T) {
4242
for previousImage, flagsFn := range previousVersionImages {
4343
t.Run(fmt.Sprintf("Backward compatibility upgrading from %s", previousImage), func(t *testing.T) {
44-
flags := ChunksStorageFlags
44+
flags := ChunksStorageFlags()
4545
if flagsFn != nil {
4646
flags = flagsFn(flags)
4747
}
@@ -54,7 +54,7 @@ func TestBackwardCompatibilityWithChunksStorage(t *testing.T) {
5454
func TestNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T) {
5555
for previousImage, flagsFn := range previousVersionImages {
5656
t.Run(fmt.Sprintf("Backward compatibility upgrading from %s", previousImage), func(t *testing.T) {
57-
flags := ChunksStorageFlags
57+
flags := ChunksStorageFlags()
5858
if flagsFn != nil {
5959
flags = flagsFn(flags)
6060
}
@@ -78,7 +78,7 @@ func runBackwardCompatibilityTestWithChunksStorage(t *testing.T, previousImage s
7878

7979
// Start Cortex table-manager (running on current version since the backward compatibility
8080
// test is about testing a rolling update of other services).
81-
tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "")
81+
tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "")
8282
require.NoError(t, s.StartAndWaitReady(tableManager))
8383

8484
// Wait until the first table-manager sync has completed, so that we're
@@ -87,7 +87,7 @@ func runBackwardCompatibilityTestWithChunksStorage(t *testing.T, previousImage s
8787

8888
// Start other Cortex components (ingester running on previous version).
8989
ingester1 := e2ecortex.NewIngester("ingester-1", consul.NetworkHTTPEndpoint(), flagsForOldImage, previousImage)
90-
distributor := e2ecortex.NewDistributor("distributor", consul.NetworkHTTPEndpoint(), ChunksStorageFlags, "")
90+
distributor := e2ecortex.NewDistributor("distributor", consul.NetworkHTTPEndpoint(), ChunksStorageFlags(), "")
9191
require.NoError(t, s.StartAndWaitReady(distributor, ingester1))
9292

9393
// Wait until the distributor has updated the ring.
@@ -104,7 +104,7 @@ func runBackwardCompatibilityTestWithChunksStorage(t *testing.T, previousImage s
104104
require.NoError(t, err)
105105
require.Equal(t, 200, res.StatusCode)
106106

107-
ingester2 := e2ecortex.NewIngester("ingester-2", consul.NetworkHTTPEndpoint(), mergeFlags(ChunksStorageFlags, map[string]string{
107+
ingester2 := e2ecortex.NewIngester("ingester-2", consul.NetworkHTTPEndpoint(), mergeFlags(ChunksStorageFlags(), map[string]string{
108108
"-ingester.join-after": "10s",
109109
}), "")
110110
// Start ingester-2 on new version, to ensure the transfer is backward compatible.
@@ -117,7 +117,8 @@ func runBackwardCompatibilityTestWithChunksStorage(t *testing.T, previousImage s
117117
checkQueries(t, consul, distributor,
118118
expectedVector,
119119
previousImage,
120-
flagsForOldImage, ChunksStorageFlags,
120+
flagsForOldImage,
121+
ChunksStorageFlags(),
121122
now,
122123
s,
123124
1,
@@ -135,15 +136,15 @@ func runNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T, previo
135136
consul := e2edb.NewConsul()
136137
require.NoError(t, s.StartAndWaitReady(dynamo, consul))
137138

138-
flagsForNewImage := mergeFlags(ChunksStorageFlags, map[string]string{
139+
flagsForNewImage := mergeFlags(ChunksStorageFlags(), map[string]string{
139140
"-distributor.replication-factor": "3",
140141
})
141142

142143
require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml)))
143144

144145
// Start Cortex table-manager (running on current version since the backward compatibility
145146
// test is about testing a rolling update of other services).
146-
tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "")
147+
tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "")
147148
require.NoError(t, s.StartAndWaitReady(tableManager))
148149

149150
// Wait until the first table-manager sync has completed, so that we're
@@ -174,7 +175,8 @@ func runNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T, previo
174175
checkQueries(t, consul, distributor,
175176
expectedVector,
176177
previousImage,
177-
flagsForPreviousImage, flagsForNewImage,
178+
flagsForPreviousImage,
179+
flagsForNewImage,
178180
now,
179181
s,
180182
3,

integration/chunks_delete_series_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ func TestDeleteSeriesAllIndexBackends(t *testing.T) {
4343
storeConfigs[i] = storeConfig{From: oldestStoreStartTime.Add(time.Duration(i) * perStoreDuration).UTC().Format("2006-01-02"), IndexStore: store}
4444
}
4545

46-
flags := mergeFlags(ChunksStorageFlags, map[string]string{
46+
flags := mergeFlags(ChunksStorageFlags(), map[string]string{
4747
"-cassandra.addresses": cassandra.NetworkHTTPEndpoint(),
4848
"-cassandra.keyspace": "tests", // keyspace gets created on startup if it does not exist
4949
"-cassandra.replication-factor": "1",

integration/chunks_storage_backends_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ func TestChunksStorageAllIndexBackends(t *testing.T) {
6060
storeConfigs[i] = storeConfig{From: oldestStoreStartTime.Add(time.Duration(i) * perStoreDuration).Format("2006-01-02"), IndexStore: store}
6161
}
6262

63-
storageFlags := mergeFlags(ChunksStorageFlags, map[string]string{
63+
storageFlags := mergeFlags(ChunksStorageFlags(), map[string]string{
6464
"-cassandra.addresses": cassandra.NetworkHTTPEndpoint(),
6565
"-cassandra.keyspace": "tests", // keyspace gets created on startup if it does not exist
6666
"-cassandra.replication-factor": "1",

integration/configs.go

Lines changed: 49 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -90,46 +90,56 @@ receivers:
9090
var (
9191
cortexSchemaConfigYaml = buildSchemaConfigWith([]storeConfig{{From: "2019-03-20", IndexStore: "aws-dynamo"}})
9292

93-
AlertmanagerFlags = map[string]string{
94-
"-alertmanager.configs.poll-interval": "1s",
95-
"-alertmanager.web.external-url": "http://localhost/api/prom",
93+
AlertmanagerFlags = func() map[string]string {
94+
return map[string]string{
95+
"-alertmanager.configs.poll-interval": "1s",
96+
"-alertmanager.web.external-url": "http://localhost/api/prom",
97+
}
9698
}
9799

98-
AlertmanagerLocalFlags = map[string]string{
99-
"-alertmanager.storage.type": "local",
100-
"-alertmanager.storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"),
100+
AlertmanagerLocalFlags = func() map[string]string {
101+
return map[string]string{
102+
"-alertmanager.storage.type": "local",
103+
"-alertmanager.storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"),
104+
}
101105
}
102106

103-
AlertmanagerS3Flags = map[string]string{
104-
"-alertmanager.storage.type": "s3",
105-
"-alertmanager.storage.s3.buckets": "cortex-alerts",
106-
"-alertmanager.storage.s3.force-path-style": "true",
107-
"-alertmanager.storage.s3.url": fmt.Sprintf("s3://%s:%s@%s-minio-9000.:9000", e2edb.MinioAccessKey, e2edb.MinioSecretKey, networkName),
107+
AlertmanagerS3Flags = func() map[string]string {
108+
return map[string]string{
109+
"-alertmanager.storage.type": "s3",
110+
"-alertmanager.storage.s3.buckets": "cortex-alerts",
111+
"-alertmanager.storage.s3.force-path-style": "true",
112+
"-alertmanager.storage.s3.url": fmt.Sprintf("s3://%s:%s@%s-minio-9000.:9000", e2edb.MinioAccessKey, e2edb.MinioSecretKey, networkName),
113+
}
108114
}
109115

110-
RulerConfigs = map[string]string{
111-
"-ruler.enable-sharding": "false",
112-
"-ruler.poll-interval": "2s",
113-
"-experimental.ruler.enable-api": "true",
114-
"-ruler.storage.type": "s3",
115-
"-ruler.storage.s3.buckets": "cortex-rules",
116-
"-ruler.storage.s3.force-path-style": "true",
117-
"-ruler.storage.s3.url": fmt.Sprintf("s3://%s:%s@%s-minio-9000.:9000", e2edb.MinioAccessKey, e2edb.MinioSecretKey, networkName),
116+
RulerFlags = func() map[string]string {
117+
return map[string]string{
118+
"-ruler.enable-sharding": "false",
119+
"-ruler.poll-interval": "2s",
120+
"-experimental.ruler.enable-api": "true",
121+
"-ruler.storage.type": "s3",
122+
"-ruler.storage.s3.buckets": "cortex-rules",
123+
"-ruler.storage.s3.force-path-style": "true",
124+
"-ruler.storage.s3.url": fmt.Sprintf("s3://%s:%s@%s-minio-9000.:9000", e2edb.MinioAccessKey, e2edb.MinioSecretKey, networkName),
125+
}
118126
}
119127

120-
BlocksStorageFlags = map[string]string{
121-
"-store.engine": blocksStorageEngine,
122-
"-blocks-storage.backend": "s3",
123-
"-blocks-storage.tsdb.block-ranges-period": "1m",
124-
"-blocks-storage.bucket-store.sync-interval": "5s",
125-
"-blocks-storage.tsdb.retention-period": "5m",
126-
"-blocks-storage.tsdb.ship-interval": "1m",
127-
"-blocks-storage.tsdb.head-compaction-interval": "1s",
128-
"-blocks-storage.s3.access-key-id": e2edb.MinioAccessKey,
129-
"-blocks-storage.s3.secret-access-key": e2edb.MinioSecretKey,
130-
"-blocks-storage.s3.bucket-name": bucketName,
131-
"-blocks-storage.s3.endpoint": fmt.Sprintf("%s-minio-9000:9000", networkName),
132-
"-blocks-storage.s3.insecure": "true",
128+
BlocksStorageFlags = func() map[string]string {
129+
return map[string]string{
130+
"-store.engine": blocksStorageEngine,
131+
"-blocks-storage.backend": "s3",
132+
"-blocks-storage.tsdb.block-ranges-period": "1m",
133+
"-blocks-storage.bucket-store.sync-interval": "5s",
134+
"-blocks-storage.tsdb.retention-period": "5m",
135+
"-blocks-storage.tsdb.ship-interval": "1m",
136+
"-blocks-storage.tsdb.head-compaction-interval": "1s",
137+
"-blocks-storage.s3.access-key-id": e2edb.MinioAccessKey,
138+
"-blocks-storage.s3.secret-access-key": e2edb.MinioSecretKey,
139+
"-blocks-storage.s3.bucket-name": bucketName,
140+
"-blocks-storage.s3.endpoint": fmt.Sprintf("%s-minio-9000:9000", networkName),
141+
"-blocks-storage.s3.insecure": "true",
142+
}
133143
}
134144

135145
BlocksStorageConfig = buildConfigFromTemplate(`
@@ -163,11 +173,13 @@ blocks_storage:
163173
MinioEndpoint: fmt.Sprintf("%s-minio-9000:9000", networkName),
164174
})
165175

166-
ChunksStorageFlags = map[string]string{
167-
"-dynamodb.url": fmt.Sprintf("dynamodb://u:p@%s-dynamodb.:8000", networkName),
168-
"-table-manager.poll-interval": "1m",
169-
"-schema-config-file": filepath.Join(e2e.ContainerSharedDir, cortexSchemaConfigFile),
170-
"-table-manager.retention-period": "168h",
176+
ChunksStorageFlags = func() map[string]string {
177+
return map[string]string{
178+
"-dynamodb.url": fmt.Sprintf("dynamodb://u:p@%s-dynamodb.:8000", networkName),
179+
"-table-manager.poll-interval": "1m",
180+
"-schema-config-file": filepath.Join(e2e.ContainerSharedDir, cortexSchemaConfigFile),
181+
"-table-manager.retention-period": "168h",
182+
}
171183
}
172184

173185
ChunksStorageConfig = buildConfigFromTemplate(`

integration/ingester_flush_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,12 +32,12 @@ func TestIngesterFlushWithChunksStorage(t *testing.T) {
3232
// Start Cortex components.
3333
require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml)))
3434

35-
tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "")
36-
ingester := e2ecortex.NewIngester("ingester", consul.NetworkHTTPEndpoint(), mergeFlags(ChunksStorageFlags, map[string]string{
35+
tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "")
36+
ingester := e2ecortex.NewIngester("ingester", consul.NetworkHTTPEndpoint(), mergeFlags(ChunksStorageFlags(), map[string]string{
3737
"-ingester.max-transfer-retries": "0",
3838
}), "")
39-
querier := e2ecortex.NewQuerier("querier", consul.NetworkHTTPEndpoint(), ChunksStorageFlags, "")
40-
distributor := e2ecortex.NewDistributor("distributor", consul.NetworkHTTPEndpoint(), ChunksStorageFlags, "")
39+
querier := e2ecortex.NewQuerier("querier", consul.NetworkHTTPEndpoint(), ChunksStorageFlags(), "")
40+
distributor := e2ecortex.NewDistributor("distributor", consul.NetworkHTTPEndpoint(), ChunksStorageFlags(), "")
4141
require.NoError(t, s.StartAndWaitReady(distributor, querier, ingester, tableManager))
4242

4343
// Wait until the first table-manager sync has completed, so that we're

integration/ingester_hand_over_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,13 @@ import (
1717
)
1818

1919
func TestIngesterHandOverWithChunksStorage(t *testing.T) {
20-
runIngesterHandOverTest(t, ChunksStorageFlags, func(t *testing.T, s *e2e.Scenario) {
20+
runIngesterHandOverTest(t, ChunksStorageFlags(), func(t *testing.T, s *e2e.Scenario) {
2121
dynamo := e2edb.NewDynamoDB()
2222
require.NoError(t, s.StartAndWaitReady(dynamo))
2323

2424
require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml)))
2525

26-
tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "")
26+
tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "")
2727
require.NoError(t, s.StartAndWaitReady(tableManager))
2828

2929
// Wait until the first table-manager sync has completed, so that we're

integration/ingester_sharding_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ func TestIngesterSharding(t *testing.T) {
4444
require.NoError(t, err)
4545
defer s.Close()
4646

47-
flags := BlocksStorageFlags
47+
flags := BlocksStorageFlags()
4848
flags["-distributor.shard-by-all-labels"] = "true"
4949
flags["-distributor.sharding-strategy"] = testData.shardingStrategy
5050
flags["-distributor.ingestion-tenant-shard-size"] = strconv.Itoa(testData.tenantShardSize)

integration/integration_memberlist_single_binary_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ func newSingleBinary(name string, join string) *e2ecortex.CortexService {
7777

7878
serv := e2ecortex.NewSingleBinary(
7979
name,
80-
mergeFlags(ChunksStorageFlags, flags),
80+
mergeFlags(ChunksStorageFlags(), flags),
8181
"",
8282
8000,
8383
)

integration/querier_remote_read_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,15 +29,15 @@ func TestQuerierRemoteRead(t *testing.T) {
2929
defer s.Close()
3030

3131
require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml)))
32-
flags := mergeFlags(ChunksStorageFlags, map[string]string{})
32+
flags := mergeFlags(ChunksStorageFlags(), map[string]string{})
3333

3434
// Start dependencies.
3535
dynamo := e2edb.NewDynamoDB()
3636

3737
consul := e2edb.NewConsul()
3838
require.NoError(t, s.StartAndWaitReady(consul, dynamo))
3939

40-
tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "")
40+
tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "")
4141
require.NoError(t, s.StartAndWaitReady(tableManager))
4242

4343
// Wait until the first table-manager sync has completed, so that we're
@@ -63,7 +63,7 @@ func TestQuerierRemoteRead(t *testing.T) {
6363
require.NoError(t, err)
6464
require.Equal(t, 200, res.StatusCode)
6565

66-
querier := e2ecortex.NewQuerier("querier", consul.NetworkHTTPEndpoint(), ChunksStorageFlags, "")
66+
querier := e2ecortex.NewQuerier("querier", consul.NetworkHTTPEndpoint(), ChunksStorageFlags(), "")
6767
require.NoError(t, s.StartAndWaitReady(querier))
6868

6969
// Wait until the querier has updated the ring.

0 commit comments

Comments
 (0)