Skip to content
18 changes: 9 additions & 9 deletions 01_deployment.tf
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ resource "kubernetes_deployment_v1" "deployment" {
}

spec {
replicas = var.podResourceTypeConfig.minReplicas
replicas = var.infraOverrideConfig.replicas.min == null ? var.podResourceTypeConfig.minReplicas : var.infraOverrideConfig.replicas.min

strategy {
type = local.pvcEnabled ? "Recreate" : "RollingUpdate"
Expand All @@ -34,9 +34,7 @@ resource "kubernetes_deployment_v1" "deployment" {

template {
metadata {
labels = merge(var.consistency.soft.labels, {
hash = sha1(base64encode(join("", concat(local.configVolumeHashData, local.configEnvHashData, local.secretVolumeHashData, local.secretEnvHashData, local.customCommandsHashData))))
})
labels = local.templateLabels
annotations = var.podResourceTypeConfig.podAnnotations
}

Expand Down Expand Up @@ -86,7 +84,7 @@ resource "kubernetes_deployment_v1" "deployment" {
topology_key = topology_spread_constraint.value.topologyKey
when_unsatisfiable = topology_spread_constraint.value.whenUnsatisfiable
label_selector {
match_labels = var.consistency.soft.matchLabels
match_labels = local.templateLabels
}
}
}
Expand Down Expand Up @@ -384,8 +382,8 @@ resource "kubernetes_deployment_v1" "deployment" {
}

resources {
requests = init_container.value.resources[local.infrastructureSize].requests
limits = init_container.value.resources[local.infrastructureSize].limits
requests = lookup(var.infraOverrideConfig.resources, init_container.key, lookup(init_container.value.resources, var.infrastructureSize, local.fallbackResources)).requests
limits = lookup(var.infraOverrideConfig.resources, init_container.key, lookup(init_container.value.resources, var.infrastructureSize, local.fallbackResources)).limits
}

dynamic "volume_mount" {
Expand Down Expand Up @@ -461,6 +459,7 @@ resource "kubernetes_deployment_v1" "deployment" {
mount_path = volume_mount.value.path
mount_propagation = volume_mount.value.propagation
name = volume_mount.key
read_only = volume_mount.value.readOnly
}
}

Expand Down Expand Up @@ -785,8 +784,8 @@ resource "kubernetes_deployment_v1" "deployment" {
}

resources {
requests = { for k, v in container.value.resources[local.infrastructureSize].requests : k => v == null ? null : "${regex(local.resourceMultiplierRegex, v)[0] * local.resourceMultiplier}${regex(local.resourceMultiplierRegex, v)[1]}" }
limits = { for k, v in container.value.resources[local.infrastructureSize].limits : k => v == null ? null : "${regex(local.resourceMultiplierRegex, v)[0] * local.resourceMultiplier}${regex(local.resourceMultiplierRegex, v)[1]}" }
requests = lookup(var.infraOverrideConfig.resources, container.key, lookup(container.value.resources, var.infrastructureSize, local.fallbackResources)).requests
limits = lookup(var.infraOverrideConfig.resources, container.key, lookup(container.value.resources, var.infrastructureSize, local.fallbackResources)).limits
}

dynamic "volume_mount" {
Expand Down Expand Up @@ -862,6 +861,7 @@ resource "kubernetes_deployment_v1" "deployment" {
mount_path = volume_mount.value.path
mount_propagation = volume_mount.value.propagation
name = volume_mount.key
read_only = volume_mount.value.readOnly
}
}

Expand Down
16 changes: 8 additions & 8 deletions 02_daemonset.tf
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,7 @@ resource "kubernetes_daemon_set_v1" "daemonset" {

template {
metadata {
labels = merge(var.consistency.soft.labels, {
hash = sha1(base64encode(join("", concat(local.configVolumeHashData, local.configEnvHashData, local.secretVolumeHashData, local.secretEnvHashData, local.customCommandsHashData))))
})
labels = local.templateLabels
annotations = var.podResourceTypeConfig.podAnnotations
}

Expand Down Expand Up @@ -79,7 +77,7 @@ resource "kubernetes_daemon_set_v1" "daemonset" {
topology_key = topology_spread_constraint.value.topologyKey
when_unsatisfiable = topology_spread_constraint.value.whenUnsatisfiable
label_selector {
match_labels = var.consistency.soft.matchLabels
match_labels = local.templateLabels
}
}
}
Expand Down Expand Up @@ -367,8 +365,8 @@ resource "kubernetes_daemon_set_v1" "daemonset" {
}

resources {
requests = init_container.value.resources[local.infrastructureSize].requests
limits = init_container.value.resources[local.infrastructureSize].limits
requests = lookup(var.infraOverrideConfig.resources, init_container.key, lookup(init_container.value.resources, var.infrastructureSize, local.fallbackResources)).requests
limits = lookup(var.infraOverrideConfig.resources, init_container.key, lookup(init_container.value.resources, var.infrastructureSize, local.fallbackResources)).limits
}

dynamic "volume_mount" {
Expand Down Expand Up @@ -435,6 +433,7 @@ resource "kubernetes_daemon_set_v1" "daemonset" {
mount_path = volume_mount.value.path
mount_propagation = volume_mount.value.propagation
name = volume_mount.key
read_only = volume_mount.value.readOnly
}
}

Expand Down Expand Up @@ -760,8 +759,8 @@ resource "kubernetes_daemon_set_v1" "daemonset" {
}

resources {
requests = { for k, v in container.value.resources[local.infrastructureSize].requests : k => v == null ? null : "${regex(local.resourceMultiplierRegex, v)[0] * local.resourceMultiplier}${regex(local.resourceMultiplierRegex, v)[1]}" }
limits = { for k, v in container.value.resources[local.infrastructureSize].limits : k => v == null ? null : "${regex(local.resourceMultiplierRegex, v)[0] * local.resourceMultiplier}${regex(local.resourceMultiplierRegex, v)[1]}" }
requests = lookup(var.infraOverrideConfig.resources, container.key, lookup(container.value.resources, var.infrastructureSize, local.fallbackResources)).requests
limits = lookup(var.infraOverrideConfig.resources, container.key, lookup(container.value.resources, var.infrastructureSize, local.fallbackResources)).limits
}

dynamic "volume_mount" {
Expand Down Expand Up @@ -828,6 +827,7 @@ resource "kubernetes_daemon_set_v1" "daemonset" {
mount_path = volume_mount.value.path
mount_propagation = volume_mount.value.propagation
name = volume_mount.key
read_only = volume_mount.value.readOnly
}
}

Expand Down
23 changes: 14 additions & 9 deletions 03_statefulset.tf
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ resource "kubernetes_stateful_set_v1" "statefulset" {

spec {
service_name = kubernetes_service_v1.clusterIp.0.metadata.0.name
replicas = var.podResourceTypeConfig.minReplicas
replicas = var.infraOverrideConfig.replicas.min == null ? var.podResourceTypeConfig.minReplicas : var.infraOverrideConfig.replicas.min
pod_management_policy = var.podResourceTypeConfig.podManagementPolicy

selector {
Expand Down Expand Up @@ -39,9 +39,7 @@ resource "kubernetes_stateful_set_v1" "statefulset" {

template {
metadata {
labels = merge(var.consistency.soft.labels, {
hash = sha1(base64encode(join("", concat(local.configVolumeHashData, local.configEnvHashData, local.secretVolumeHashData, local.secretEnvHashData, local.customCommandsHashData))))
})
labels = local.templateLabels
annotations = var.podResourceTypeConfig.podAnnotations
}

Expand Down Expand Up @@ -91,7 +89,7 @@ resource "kubernetes_stateful_set_v1" "statefulset" {
topology_key = topology_spread_constraint.value.topologyKey
when_unsatisfiable = topology_spread_constraint.value.whenUnsatisfiable
label_selector {
match_labels = var.consistency.soft.matchLabels
match_labels = local.templateLabels
}
}
}
Expand Down Expand Up @@ -389,8 +387,8 @@ resource "kubernetes_stateful_set_v1" "statefulset" {
}

resources {
requests = init_container.value.resources[local.infrastructureSize].requests
limits = init_container.value.resources[local.infrastructureSize].limits
requests = lookup(var.infraOverrideConfig.resources, init_container.key, lookup(init_container.value.resources, var.infrastructureSize, local.fallbackResources)).requests
limits = lookup(var.infraOverrideConfig.resources, init_container.key, lookup(init_container.value.resources, var.infrastructureSize, local.fallbackResources)).limits
}

dynamic "volume_mount" {
Expand Down Expand Up @@ -466,6 +464,7 @@ resource "kubernetes_stateful_set_v1" "statefulset" {
mount_path = volume_mount.value.path
mount_propagation = volume_mount.value.propagation
name = volume_mount.key
read_only = volume_mount.value.readOnly
}
}

Expand Down Expand Up @@ -790,8 +789,8 @@ resource "kubernetes_stateful_set_v1" "statefulset" {
}

resources {
requests = { for k, v in container.value.resources[local.infrastructureSize].requests : k => v == null ? null : "${regex(local.resourceMultiplierRegex, v)[0] * local.resourceMultiplier}${regex(local.resourceMultiplierRegex, v)[1]}" }
limits = { for k, v in container.value.resources[local.infrastructureSize].limits : k => v == null ? null : "${regex(local.resourceMultiplierRegex, v)[0] * local.resourceMultiplier}${regex(local.resourceMultiplierRegex, v)[1]}" }
requests = lookup(var.infraOverrideConfig.resources, container.key, lookup(container.value.resources, var.infrastructureSize, local.fallbackResources)).requests
limits = lookup(var.infraOverrideConfig.resources, container.key, lookup(container.value.resources, var.infrastructureSize, local.fallbackResources)).limits
}

dynamic "volume_mount" {
Expand Down Expand Up @@ -867,6 +866,7 @@ resource "kubernetes_stateful_set_v1" "statefulset" {
mount_path = volume_mount.value.path
mount_propagation = volume_mount.value.propagation
name = volume_mount.key
read_only = volume_mount.value.readOnly
}
}

Expand All @@ -883,4 +883,9 @@ resource "kubernetes_stateful_set_v1" "statefulset" {
}
}
}
lifecycle {
ignore_changes = [
spec.0.volume_claim_template.0.metadata.0.labels,
]
}
}
18 changes: 9 additions & 9 deletions 04_cronJob.tf
Original file line number Diff line number Diff line change
Expand Up @@ -24,16 +24,14 @@ resource "kubernetes_cron_job_v1" "cronJob" {
annotations = var.podResourceTypeConfig.annotations
}
spec {
parallelism = var.podResourceTypeConfig.minReplicas
parallelism = var.infraOverrideConfig.replicas.min == null ? var.podResourceTypeConfig.minReplicas : var.infraOverrideConfig.replicas.min
backoff_limit = var.podResourceTypeConfig.backoffLimit
ttl_seconds_after_finished = var.podResourceTypeConfig.ttlSecondsAfterFinished
completions = var.podResourceTypeConfig.completions

template {
metadata {
labels = merge(var.consistency.soft.labels, {
hash = sha1(base64encode(join("", concat(local.configVolumeHashData, local.configEnvHashData, local.secretVolumeHashData, local.secretEnvHashData, local.customCommandsHashData))))
})
labels = local.templateLabels
annotations = var.podResourceTypeConfig.podAnnotations
}

Expand Down Expand Up @@ -84,7 +82,7 @@ resource "kubernetes_cron_job_v1" "cronJob" {
topology_key = topology_spread_constraint.value.topologyKey
when_unsatisfiable = topology_spread_constraint.value.whenUnsatisfiable
label_selector {
match_labels = var.consistency.soft.matchLabels
match_labels = local.templateLabels
}
}
}
Expand Down Expand Up @@ -382,8 +380,8 @@ resource "kubernetes_cron_job_v1" "cronJob" {
}

resources {
requests = init_container.value.resources[local.infrastructureSize].requests
limits = init_container.value.resources[local.infrastructureSize].limits
requests = lookup(var.infraOverrideConfig.resources, init_container.key, lookup(init_container.value.resources, var.infrastructureSize, local.fallbackResources)).requests
limits = lookup(var.infraOverrideConfig.resources, init_container.key, lookup(init_container.value.resources, var.infrastructureSize, local.fallbackResources)).limits
}

dynamic "volume_mount" {
Expand Down Expand Up @@ -459,6 +457,7 @@ resource "kubernetes_cron_job_v1" "cronJob" {
mount_path = volume_mount.value.path
mount_propagation = volume_mount.value.propagation
name = volume_mount.key
read_only = volume_mount.value.readOnly
}
}

Expand Down Expand Up @@ -654,8 +653,8 @@ resource "kubernetes_cron_job_v1" "cronJob" {
}

resources {
requests = { for k, v in container.value.resources[local.infrastructureSize].requests : k => v == null ? null : "${regex(local.resourceMultiplierRegex, v)[0] * local.resourceMultiplier}${regex(local.resourceMultiplierRegex, v)[1]}" }
limits = { for k, v in container.value.resources[local.infrastructureSize].limits : k => v == null ? null : "${regex(local.resourceMultiplierRegex, v)[0] * local.resourceMultiplier}${regex(local.resourceMultiplierRegex, v)[1]}" }
requests = lookup(var.infraOverrideConfig.resources, container.key, lookup(container.value.resources, var.infrastructureSize, local.fallbackResources)).requests
limits = lookup(var.infraOverrideConfig.resources, container.key, lookup(container.value.resources, var.infrastructureSize, local.fallbackResources)).limits
}

dynamic "volume_mount" {
Expand Down Expand Up @@ -731,6 +730,7 @@ resource "kubernetes_cron_job_v1" "cronJob" {
mount_path = volume_mount.value.path
mount_propagation = volume_mount.value.propagation
name = volume_mount.key
read_only = volume_mount.value.readOnly
}
}

Expand Down
18 changes: 9 additions & 9 deletions 05_job.tf
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ resource "kubernetes_job_v1" "job" {
}

spec {
parallelism = var.podResourceTypeConfig.minReplicas
parallelism = var.infraOverrideConfig.replicas.min == null ? var.podResourceTypeConfig.minReplicas : var.infraOverrideConfig.replicas.min
backoff_limit = var.podResourceTypeConfig.backoffLimit
ttl_seconds_after_finished = var.podResourceTypeConfig.ttlSecondsAfterFinished
completions = var.podResourceTypeConfig.completions
Expand All @@ -23,9 +23,7 @@ resource "kubernetes_job_v1" "job" {

template {
metadata {
labels = merge(var.consistency.soft.labels, {
hash = sha1(base64encode(join("", concat(local.configVolumeHashData, local.configEnvHashData, local.secretVolumeHashData, local.secretEnvHashData, local.customCommandsHashData))))
})
labels = local.templateLabels
annotations = var.podResourceTypeConfig.podAnnotations
}

Expand Down Expand Up @@ -76,7 +74,7 @@ resource "kubernetes_job_v1" "job" {
topology_key = topology_spread_constraint.value.topologyKey
when_unsatisfiable = topology_spread_constraint.value.whenUnsatisfiable
label_selector {
match_labels = var.consistency.soft.matchLabels
match_labels = local.templateLabels
}
}
}
Expand Down Expand Up @@ -364,8 +362,8 @@ resource "kubernetes_job_v1" "job" {
}

resources {
requests = init_container.value.resources[local.infrastructureSize].requests
limits = init_container.value.resources[local.infrastructureSize].limits
requests = lookup(var.infraOverrideConfig.resources, init_container.key, lookup(init_container.value.resources, var.infrastructureSize, local.fallbackResources)).requests
limits = lookup(var.infraOverrideConfig.resources, init_container.key, lookup(init_container.value.resources, var.infrastructureSize, local.fallbackResources)).limits
}

dynamic "volume_mount" {
Expand Down Expand Up @@ -432,6 +430,7 @@ resource "kubernetes_job_v1" "job" {
mount_path = volume_mount.value.path
mount_propagation = volume_mount.value.propagation
name = volume_mount.key
read_only = volume_mount.value.readOnly
}
}

Expand Down Expand Up @@ -627,8 +626,8 @@ resource "kubernetes_job_v1" "job" {
}

resources {
requests = { for k, v in container.value.resources[local.infrastructureSize].requests : k => v == null ? null : "${regex(local.resourceMultiplierRegex, v)[0] * local.resourceMultiplier}${regex(local.resourceMultiplierRegex, v)[1]}" }
limits = { for k, v in container.value.resources[local.infrastructureSize].limits : k => v == null ? null : "${regex(local.resourceMultiplierRegex, v)[0] * local.resourceMultiplier}${regex(local.resourceMultiplierRegex, v)[1]}" }
requests = lookup(var.infraOverrideConfig.resources, container.key, lookup(container.value.resources, var.infrastructureSize, local.fallbackResources)).requests
limits = lookup(var.infraOverrideConfig.resources, container.key, lookup(container.value.resources, var.infrastructureSize, local.fallbackResources)).limits
}

dynamic "volume_mount" {
Expand Down Expand Up @@ -695,6 +694,7 @@ resource "kubernetes_job_v1" "job" {
mount_path = volume_mount.value.path
mount_propagation = volume_mount.value.propagation
name = volume_mount.key
read_only = volume_mount.value.readOnly
}
}

Expand Down
3 changes: 3 additions & 0 deletions 06_pvc.tf
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ resource "kubernetes_persistent_volume_claim_v1" "pvc" {
}

lifecycle {
ignore_changes = [
spec
]
prevent_destroy = true
}
}
2 changes: 2 additions & 0 deletions 14_service.tf
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ variable "service" {
ip = optional(string, null)
publishNotReadyAddresses = optional(bool, false)
sessionAffinity = optional(string, "None")
internalTrafficPolicy = optional(string, "Cluster")
annotations = optional(map(string), {})
remapPorts = optional(map(string), {})
}), {})
Expand Down Expand Up @@ -86,6 +87,7 @@ resource "kubernetes_service_v1" "clusterIp" {
cluster_ip = var.service.clusterIp.ip
publish_not_ready_addresses = var.service.clusterIp.publishNotReadyAddresses
session_affinity = var.service.clusterIp.sessionAffinity
internal_traffic_policy = var.service.clusterIp.internalTrafficPolicy

dynamic "port" {
for_each = local.serviceClusterIpPorts
Expand Down
Loading