diff --git a/roles/provision/README.md b/roles/provision/README.md new file mode 100644 index 00000000..fcdc33a7 --- /dev/null +++ b/roles/provision/README.md @@ -0,0 +1,65 @@ +# Provision + +A role that provisions Cloudera-specific inventory. + +The role requires the following two files that are locatable by the enclosing play: + +* *hostvars.j2* - a Jinja macro that outputs a host's variables in a static inventory file +* *instance_vars.j2* - a Jinja macro that outputs an instance's metadata, i.e. tags, in the provider + +These two Jinja macros _expect variables on the host_ which are assigned via the `add_host` call +within the role. To set these variables, use the `module_defaults` assignment within the enclosing +play of the role. + +## Examples + +### module_defaults + +The `node` variable is in scope of the `add_host` module and contains the output of the Terraform +node provisioning configuration. + +```yaml +- name: Provision resources + hosts: localhost + connection: local + gather_facts: no + module_defaults: + ansible.builtin.add_host: + groups: "{{ node.groups | default(omit) }}" + host_template: "{{ node.metadata.host_template | default(omit) }}" + storage_volumes: "{{ node.storage_volumes | default([]) }}" + tls: "{{ node.metadata.tls | default(omit) }}" + tasks: ... +``` + +### hostvars.j2 + +```jinja +{# Collect and output individual host variables #} +{% macro host_variables(host) %} +{% set fields = [] %} +{% set _ = fields.append("ansible_user=" + host['ansible_user']) if 'ansible_user' in host %} +{% set _ = fields.append("host_template=" + host['host_template']) if 'host_template' in host %} +{% set _ = fields.append("label=" + host['label']) if 'label' in host %} +{% set _ = fields.append("tls=" + host['tls'] | string) if 'tls' in host %} +{{ host['inventory_hostname'] }} {{ fields | join(' ') }} +{%- endmacro %} +``` + +### instance_vars.j2 + +```jinja +{# Define the metadata tags for the individual Openstack instances #} +{# Output should be TF map _entries_, not a map itself #} + +{% macro instance_tags(host) %} +{% set tags = {} %} +{% set _ = tags.update({ 'ansible_user': host.ansible_user }) if host.ansible_user is defined %} +{% set _ = tags.update({ 'host_template': host.host_template }) if host.host_template is defined %} +{% set _ = tags.update({ 'groups': host.groups | join(', ') }) if host.groups is defined %} +{% set _ = tags.update({ 'tls': host.tls | string }) if host.tls is defined %} +{% for k, v in tags.items() %} + {{ k }} = "{{ v }}"{{ "," if not loop.last else "" }} +{% endfor %} +{%- endmacro %} +``` diff --git a/roles/provision/defaults/main.yml b/roles/provision/defaults/main.yml new file mode 100644 index 00000000..389024d2 --- /dev/null +++ b/roles/provision/defaults/main.yml @@ -0,0 +1,31 @@ +--- +provision_state: present # absent +provision_provider: aws # aws, etc. +provision_directory: tf_deployment + +provision_inventory_file: "{{ undef(hint='Static inventory file') }}" # inventory_static.ini + +# provision_terraform_parallelism: +provision_state_storage: local # remote_s3 +# provision_remote_storage_s3_region: +# provision_remote_storage_s3_bucket: +provision_create_remote_storage: False + +provision_name_prefix: "{{ undef(hint='Deployment name prefix') }}" +provision_domain_suffix: "{{ undef(hint='DNS domain suffix') }}" +provision_ssh_keypair_label: "{{ undef(hint='SSH keypair label') }}" +provision_ssh_keypair_public_key: "{{ undef(hint='SSH keypair public key text') }}" +provision_owner_email: "{{ undef(hint='Resource owner email') }}" +provision_tags: {} + +provision_aws_ec2_region: "{{ undef(hint='AWS EC2 region') }}" +#provision_aws_ec2_default_ami_filters: "{{ undef(hint='AWS EC2 filters for default AMI') }}" +#provision_aws_ec2_default_ami_owners: "{{ undef(hint='AWS EC2 AMI owner filter') }}" +#provision_aws_ec2_vpc_name: +provision_aws_ec2_vpc_enable_dns_support: true +provision_aws_ec2_vpc_enable_dns_hostnames: true +#provision_aws_ec2_public_subnets: +#provision_aws_ec2_private_subnets: + +#provision_default_instance_user: +provision_instances: "{{ undef(hint='Instance definitions') }}" diff --git a/roles/provision/files/aws/keypair.tf b/roles/provision/files/aws/keypair.tf new file mode 100644 index 00000000..91e38a96 --- /dev/null +++ b/roles/provision/files/aws/keypair.tf @@ -0,0 +1,31 @@ +variable "ssh_keypair_name" { + type = string + description = "AWS SSH key pair name" + validation { + condition = length(var.ssh_keypair_name) > 4 + error_message = "The SSH key pair name must be greater than 4 characters." + } +} + +variable "ssh_keypair_public_key_text" { + type = string + description = "AWS SSH key pair public key text" + validation { + condition = length(var.ssh_keypair_public_key_text) > 0 + error_message = "The SSH key pair public key text must not be empty." + } +} + +resource "aws_key_pair" "deployment_keypair" { + key_name = var.ssh_keypair_name + public_key = var.ssh_keypair_public_key_text +} + +output "ssh_keypair" { + value = { + name = aws_key_pair.deployment_keypair.key_name + public_key = var.ssh_keypair_public_key_text + fingerprint = aws_key_pair.deployment_keypair.fingerprint + } + description = "Deployment SSH keypair" +} \ No newline at end of file diff --git a/roles/provision/files/aws/network.tf b/roles/provision/files/aws/network.tf new file mode 100644 index 00000000..4fefb218 --- /dev/null +++ b/roles/provision/files/aws/network.tf @@ -0,0 +1,269 @@ +variable "cluster_prefix" { + type = string + description = "Deployment cluster prefix" + validation { + condition = length(var.cluster_prefix) < 9 && length(var.cluster_prefix) > 4 + error_message = "The deployment cluster prefix must be between 5-8 characters." + } +} + +variable "vpc_name" { + type = string + description = "VPC name" + default = null +} + +variable "vpc_cidr" { + type = string + description = "VPC CIDR block" + default = "172.16.0.0/16" +} + +variable "vpc_enable_dns_support" { + type = bool + description = "VPC DNS Support" + default = true +} + +variable "vpc_enable_dns_hostnames" { + type = bool + description = "VPC DNS Hostnames" + default = true +} + +variable "igw_name" { + type = string + description = "Internet Gateway name" + default = null +} + +variable "public_subnets" { + type = list(object({ + az = string + name = optional(string) + cidr = optional(string) + cidr_range = optional(number, 4) + tags = optional(map(string), {}) + })) + + description = "List of public subnets" + default = [] +} + +variable "public_route_table_name" { + type = string + description = "Public route table name" + default = null +} + +variable "private_subnets" { + type = list(object({ + az = string + name = optional(string) + cidr = optional(string) + cidr_range = optional(number, 4) + tags = optional(map(string), {}) + })) + + description = "List of private subnets" + default = [] +} + +variable "nat_gateway_name" { + type = string + description = "NAT gateway name" + default = null +} + +variable "private_route_table_name" { + type = string + description = "Private route table name" + default = null +} + +# Security Groups +variable "security_group_default_name" { + type = string + description = "Default Security Group name" + default = null +} + +variable "security_group_default_desc" { + type = string + description = "Default Security Group description" + default = null +} + +variable "security_group_rules_ingress" { + type = list(object({ + cidr = list(string) + from_port = string + to_port = string + protocol = string + })) + + description = "Ingress rules for default Security Group" + default = [] +} + +# ------- Virtual Network ------- +resource "aws_vpc" "cluster" { + cidr_block = var.vpc_cidr + tags = { Name = var.vpc_name != null ? var.vpc_name : var.cluster_prefix } + instance_tenancy = "default" + enable_dns_support = var.vpc_enable_dns_support + enable_dns_hostnames = var.vpc_enable_dns_hostnames +} + +# ------- Public Network infrastructure ------- +resource "aws_internet_gateway" "igw" { + vpc_id = aws_vpc.cluster.id + tags = { Name = var.igw_name != null ? var.igw_name : var.cluster_prefix } +} + +locals { + public_subnets = [ + for idx, subnet in var.public_subnets : + merge(subnet, { name = subnet.name != null ? subnet.name : "${var.cluster_prefix}-public-${format("%02d", idx+1)}" }) + ] + + private_subnets = [ + for idx, subnet in var.private_subnets : + merge(subnet, { name = subnet.name != null ? subnet.name : "${var.cluster_prefix}-private-${format("%02d", idx+1)}" }) + ] + + cidr_allocation = zipmap( + [for subnet in concat(local.public_subnets, local.private_subnets) : subnet.name if subnet.cidr == null], + cidrsubnets(var.vpc_cidr, [for subnet in concat(local.public_subnets, local.private_subnets) : subnet.cidr_range if subnet.cidr == null]...) + ) +} + +resource "aws_subnet" "public_subnets" { + for_each = {for idx, subnet in local.public_subnets: idx => subnet} + + vpc_id = aws_vpc.cluster.id + cidr_block = each.value.cidr != null ? each.value.cidr : local.cidr_allocation[each.value.name] + map_public_ip_on_launch = true + availability_zone = each.value.az + tags = merge(each.value.tags, { Name = each.value.name }) +} + +resource "aws_default_route_table" "public_route_table" { + default_route_table_id = aws_vpc.cluster.default_route_table_id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.igw.id + } + + tags = { Name = var.public_route_table_name != null ? var.public_route_table_name : "${var.cluster_prefix}-public" } +} + +resource "aws_route_table_association" "public_subnets" { + for_each = aws_subnet.public_subnets + + subnet_id = each.value.id + route_table_id = aws_vpc.cluster.default_route_table_id +} + +# ------- Private Network infrastructure ------- +resource "aws_subnet" "private_subnets" { + for_each = {for idx, subnet in local.private_subnets: idx => subnet} + + vpc_id = aws_vpc.cluster.id + cidr_block = each.value.cidr != null ? each.value.cidr : local.cidr_allocation[each.value.name] + map_public_ip_on_launch = false + availability_zone = each.value.az + tags = merge(each.value.tags, { Name = each.value.name }) +} + +resource "aws_eip" "nat_gateway_eip" { + for_each = aws_subnet.private_subnets + + vpc = true + tags = merge(each.value.tags, { Name = "${var.cluster_prefix}-${each.value.tags_all["Name"]}-nat" }) +} + +resource "aws_nat_gateway" "private_subnets" { + for_each = aws_subnet.private_subnets + + subnet_id = aws_subnet.public_subnets[each.key % length(aws_subnet.public_subnets)].id + allocation_id = aws_eip.nat_gateway_eip[each.key].id + connectivity_type = "public" + + tags = { Name = each.value.tags_all["Name"] != null ? each.value.tags_all["Name"] : "${var.cluster_prefix}-nat-${format("%02d", each.key+1)}" } +} + +resource "aws_route_table" "private_route_table" { + for_each = aws_nat_gateway.private_subnets + + vpc_id = aws_vpc.cluster.id + + route { + cidr_block = "0.0.0.0/0" + nat_gateway_id = each.value.id + } + + tags = { Name = each.value.tags_all["Name"] } +} + +resource "aws_route_table_association" "private_subnets" { + for_each = aws_subnet.private_subnets + + subnet_id = each.value.id + route_table_id = aws_route_table.private_route_table[each.key].id +} + +# ------- Security Groups ------- +locals { + default_sg_name = var.security_group_default_name != null ? var.security_group_default_name : "${var.cluster_prefix}-default" +} + +resource "aws_security_group" "default_sg" { + vpc_id = aws_vpc.cluster.id + name = local.default_sg_name + description = var.security_group_default_desc != null ? var.security_group_default_desc : local.default_sg_name + + tags = { Name = local.default_sg_name } + + # Intra-SG communication + ingress { + from_port = 0 + to_port = 0 + protocol = "all" + self = true + } + dynamic "ingress" { + for_each = var.security_group_rules_ingress + + content { + cidr_blocks = ingress.value.cidr + from_port = ingress.value.from_port + to_port = ingress.value.to_port + protocol = ingress.value.protocol + } + + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + to_port = 0 + protocol = "all" + } +} + +output "vpc" { + value = { + cidr = aws_vpc.cluster.cidr_block + } +} + +output "subnets" { + value = { + public = ["TBD"] + private = ["TBD"] + } + + description = "Network infrastructure" +} diff --git a/roles/provision/files/aws/nodes.tf b/roles/provision/files/aws/nodes.tf new file mode 100644 index 00000000..e31d8d67 --- /dev/null +++ b/roles/provision/files/aws/nodes.tf @@ -0,0 +1,184 @@ +variable "nodes" { + type = list(object({ + name = string + ami_filters = optional(map(list(string)), { + name = [ "RHEL-8.6*" ] + architecture = [ "x86_64" ] + }) + ami_owners = optional(list(string),[ 309956199498 ]) + ami_user = optional(string, "ec2-user") + instance_type = optional(string, "m5.xlarge") + subnet_index = optional(number, 0) + elastic_ip = optional(bool, false) + private_ip = optional(string, null) + tags = optional(map(string)) + root_volume = optional(object({ + delete_on_termination = optional(bool, true) + volume_size = optional(number, 30) + volume_type = optional(string, "gp2") + }), {}) + volumes = optional(list(object({ + device_name = string + mount = string + volume_size = optional(number, 100) + volume_type = optional(string, "gp2") + tags = optional(map(string), {}) + })), []) + })) + + description = "List of infrastructure nodes" + default = [] +} + +# ------- Inventory Nodes ------- +data "aws_ami" "images" { + for_each = { for idx, node in var.nodes : idx => node } + + most_recent = true + + owners = each.value.ami_owners + dynamic filter { + for_each = each.value.ami_filters + + content { + name = filter.key + values = filter.value + } + } +} + +locals { + existing_subnets = concat(values(aws_subnet.public_subnets), values(aws_subnet.private_subnets)) + + volumes = flatten([ + for idx, node in var.nodes: + [ + for node_vol in node.volumes: + { + node_index = idx + name = node.name + device = node_vol.device_name + mount = node_vol.mount + size = node_vol.volume_size + type = node_vol.volume_type + subnet_index = node.subnet_index + } + ] + ]) +} + +resource "aws_instance" "inventory" { + for_each = {for idx, node in var.nodes: idx => node} + + # TODO Find security group(s) by name + vpc_security_group_ids = [aws_security_group.default_sg.id] + key_name = aws_key_pair.deployment_keypair.key_name + instance_type = each.value.instance_type + ami = data.aws_ami.images[each.key].id + ebs_optimized = true + + # TODO Alternatively, render by looking up the subnet name + subnet_id = local.existing_subnets[each.value.subnet_index].id + private_ip = each.value.private_ip + + root_block_device { + delete_on_termination = each.value.root_volume.delete_on_termination + volume_size = each.value.root_volume.volume_size + volume_type = each.value.root_volume.volume_type + } + + associate_public_ip_address = local.existing_subnets[each.value.subnet_index].map_public_ip_on_launch + + tags = merge(each.value.tags, { Name = each.value.name }) + + lifecycle { + precondition { + condition = length(var.public_subnets) > 0 || length(var.private_subnets) > 0 + error_message = "Unable to provision, no subnets available. You must define at least one subnet, public or private." + } + } +} + +resource "aws_eip" "inventory" { + for_each = {for idx, node in var.nodes: idx => node if node.elastic_ip} + + instance = aws_instance.inventory[each.key].id + vpc = true + tags = { + Name = each.value.name + } + + depends_on = [ + aws_internet_gateway.igw + ] +} + +resource "aws_ebs_volume" "inventory" { + for_each = {for idx, volume in local.volumes: idx => volume} + + availability_zone = local.existing_subnets[each.value.subnet_index].availability_zone + size = each.value.size + type = each.value.type + tags = { + Name = "${each.value.name}: ${each.value.device}" + mount = each.value.mount + } + #encrypted ... +} + +resource "aws_volume_attachment" "inventory" { + for_each = {for idx, volume in local.volumes: idx => volume} + + device_name = each.value.device + volume_id = aws_ebs_volume.inventory[index(local.volumes, each.value)].id + instance_id = aws_instance.inventory[each.value.node_index].id +} + +# ------- Construct outputs for details of node and storage volumes ------- +locals { + # Details for all attached volumes + attached_volumes = [ + for idx, volume in local.volumes: + { + "vol_name" = aws_ebs_volume.inventory[index(local.volumes, volume)].tags["Name"] + "vol_id" = aws_volume_attachment.inventory[index(local.volumes, volume)].volume_id + "instance" = aws_volume_attachment.inventory[index(local.volumes, volume)].instance_id + "device" = aws_volume_attachment.inventory[index(local.volumes, volume)].device_name + "mount" = aws_ebs_volume.inventory[index(local.volumes, volume)].tags["mount"] + } + if length(aws_ebs_volume.inventory) > 0 + ] + + # Attached volume details grouped by instance + attached_volumes_by_instance = { + for vol in local.attached_volumes : + vol.instance => + { + vol_name = vol.vol_name + vol_id = vol.vol_id + device = vol.device + mount = vol.mount + }... + } +} + +output "nodes" { + value = [ + for idx, v in aws_instance.inventory : + { + # The top-level keys are used by 'add_host' within the provision role; + # use the nested 'metadata' tags within a 'module_defaults' declaration + # to add additional, ad-hoc 'add_host' variables + "id" = v.id + "label" = v.tags["Name"] + "hostname" = v.tags["hostname"] + "instance_user" = var.nodes[idx].ami_user + "ipv4" = lookup(aws_eip.inventory, idx, "") != "" ? aws_eip.inventory[idx].public_ip : v.public_ip != "" ? v.public_ip : v.private_ip + "groups" = [ for g in coalesce(split(",", v.tags["groups"]), []) : trimspace(g) ] + "metadata" = { for m, t in v.tags_all : m => t if !contains(["groups", "Name", "hostname"], m) } + "storage_volumes" = lookup(local.attached_volumes_by_instance, v.id, []) + } + ] + + description = "Details of the provisioned inventory nodes." +} \ No newline at end of file diff --git a/roles/provision/files/aws/provider.tf b/roles/provision/files/aws/provider.tf new file mode 100644 index 00000000..d5bd6e20 --- /dev/null +++ b/roles/provision/files/aws/provider.tf @@ -0,0 +1,29 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.0" + } + } +} + +variable "region" { + type = string + description = "AWS Region" +} + +variable "env_tags" { + type = map(string) + description = "Tags applied to provisioned resources" + + default = { + comment = "Created with Terraform via cloudera.exe.provision" + } +} + +provider "aws" { + region = var.region + default_tags { + tags = var.env_tags + } +} diff --git a/roles/provision/meta/main.yml b/roles/provision/meta/main.yml new file mode 100755 index 00000000..1d1af1ca --- /dev/null +++ b/roles/provision/meta/main.yml @@ -0,0 +1,41 @@ +# Copyright 2023 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: Webster Mudge (wmudge@cloudera.com) + description: > + Provision Cloudera-specific inventory. + company: Cloudera + license: Apache-2.0 + + min_ansible_version: 2.10 + + platforms: + - name: Debian + versions: all + - name: Fedora + versions: all + - name: GenericLinux + versions: all + - name: MacOSX + versions: all + - name: Ubuntu + versions: all + + galaxy_tags: + - storage + - mount + - cdp + - aws + - openstack diff --git a/roles/provision/tasks/absent.yml b/roles/provision/tasks/absent.yml new file mode 100644 index 00000000..dca355b3 --- /dev/null +++ b/roles/provision/tasks/absent.yml @@ -0,0 +1,22 @@ +--- + +- name: Examine the local Terraform project directory + ansible.builtin.stat: + path: "{{ provision_directory }}" + register: tf_directory + +- name: Terraform 'destroy' the resources + when: tf_directory.stat.exists + community.general.terraform: + project_path: "{{ provision_directory }}/" + state: absent + +- name: Remove resources for remote S3 state storage + when: + - provision_create_remote_storage + - provision_state_storage == 'remote_s3' + amazon.aws.s3_bucket: + region: "{{ provision_remote_storage_s3_region }}" + name: "{{ provision_remote_storage_s3_bucket }}" + state: absent + force: yes diff --git a/roles/provision/tasks/main.yml b/roles/provision/tasks/main.yml new file mode 100644 index 00000000..03d3394c --- /dev/null +++ b/roles/provision/tasks/main.yml @@ -0,0 +1,4 @@ +--- + +- name: Execute provisioning state + ansible.builtin.include_tasks: "{{ provision_state }}.yml" \ No newline at end of file diff --git a/roles/provision/tasks/present.yml b/roles/provision/tasks/present.yml new file mode 100644 index 00000000..4cff9c59 --- /dev/null +++ b/roles/provision/tasks/present.yml @@ -0,0 +1,78 @@ +--- + +- name: Set up the local Terraform project directory + ansible.builtin.file: + state: directory + path: "{{ provision_directory }}" + +- name: Create resources for remote S3 state storage + when: + - provision_create_remote_storage + - provision_state_storage == 'remote_s3' + amazon.aws.s3_bucket: + region: "{{ provision_remote_storage_s3_region }}" + name: "{{ provision_remote_storage_s3_bucket }}" + state: present + object_ownership: "BucketOwnerPreferred" + +- name: Copy the Terraform configuration files + ansible.builtin.copy: + dest: "{{ provision_directory }}" + src: "{{ provision_provider }}/" + +- name: Generate Terraform backend state + ansible.builtin.template: + src: 'backend_state.tf.j2' + dest: "{{ [provision_directory, 'backend_state.tf'] | path_join }}" + +- name: Generate Terraform variables file + ansible.builtin.template: + src: "{{ provision_provider }}.tfvars.j2" + dest: "{{ [provision_directory, 'terraform.tfvars'] | path_join }}" + no_log: false + +- name: Terraform 'apply' the resources + community.general.terraform: + project_path: "{{ provision_directory }}/" + parallelism: "{{ provision_terraform_parallelism | default(omit) }}" + state: present + force_init: yes + register: tf_result + +- name: Establish jump host IP address + ansible.builtin.set_fact: + jump_host_ip: "{{ tf_result.outputs.nodes.value | community.general.json_query(query) | map(attribute='ipv4') | first | default(None) }}" + jump_host_user: "{{ tf_result.outputs.nodes.value | community.general.json_query(query) | map(attribute='instance_user') | first | default(None) }}" + vars: + query: "[?contains(not_null(groups,''),'jump_host')]" + +- name: Add hosts to in-memory inventory + ansible.builtin.add_host: + name: "{{ [node.hostname, provision_name_prefix, provision_domain_suffix] | join('.') | default(node.label) }}" + groups: "{{ node.groups }}" + storage_volumes: "{{ node.storage_volumes }}" + ansible_host: "{{ node.ipv4 }}" + ansible_user: "{{ node.instance_user }}" + label: "{{ node.label }}" + ansible_timeout: "{{ (jump_host_ip is defined and (jump_host_ip | length > 0)) | ternary(60, omit) }}" + ansible_ssh_common_args: "{{ (jump_host_ip is defined and (jump_host_ip | length > 0)) | ternary(jump, omit) }}" + vars: + jump: -o ProxyCommand="ssh -o User={{ jump_host_user }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q {{ jump_host_ip | default() }}" + loop: "{{ tf_result.outputs.nodes.value }}" + loop_control: + loop_var: node + label: "{{ node.label }}" + +- name: Create the static INI file from the in-memory inventory + ansible.builtin.template: + src: inventory.ini.j2 + dest: "{{ provision_inventory_file }}" + +- name: Populate host variables with Terraform infrastructure details + ansible.builtin.set_fact: + provision: "{{ outputs }}" + vars: + outputs: + nodes: "{{ tf_result.outputs.nodes.value }}" + vpc: "{{ tf_result.outputs.vpc.value | default(omit) }}" + subnets: "{{ tf_result.outputs.subnets.value | default(omit) }}" diff --git a/roles/provision/templates/aws.tfvars.j2 b/roles/provision/templates/aws.tfvars.j2 new file mode 100644 index 00000000..c8d8f493 --- /dev/null +++ b/roles/provision/templates/aws.tfvars.j2 @@ -0,0 +1,160 @@ +# ------- AWS EC2 ------- + +{% import 'instance_vars.j2' as inst with context %} +# ------- TODO: Definition of input variables from a given configuration ------- +# ------- Global settings ------- +ssh_keypair_name = "{{ provision_ssh_keypair_label }}" +ssh_keypair_public_key_text = "{{ provision_ssh_keypair_public_key }}" + +region = "{{ provision_aws_ec2_region }}" +cluster_prefix = "{{ provision_name_prefix }}" +env_tags = { +{% for key, value in provision_tags.items() %} + {{ key }} = "{{ value }}" +{% endfor %} +} + +# ------- Network Resources ------- +{% if provision_aws_ec2_vpc_name is defined %} +vpc_name = "{{ provision_aws_ec2_vpc_name }}" +{% endif %} +{% if provision_aws_ec2_cidr is defined %} +vpc_cidr = "{{ provision_aws_ec2_vpc_cidr }}" +{% endif %} +{% if provision_aws_ec2_enable_dns_support is defined %} +vpc_enable_dns_support = "{{ provision_aws_ec2_enable_dns_support }}" +{% endif %} +{% if provision_aws_ec2_enable_dns_hostnames is defined %} +vpc_enable_dns_hostnames = "{{ provision_aws_ec2_enable_dns_hostnames }}" +{% endif %} +# igw_name = + +{% if provision_aws_ec2_public_subnets is defined %} +public_subnets = [ +{% for subnet in provision_aws_ec2_public_subnets %} + { + az = "{{ subnet.az }}" +{% if subnet.name is defined %} + name = "{{ subnet.name }}" +{% endif %} +{% if subnet.cidr is defined %} + cidr = "{{ subnet.cidr }}" +{% endif %} +{% if subnet.cidr_range is defined %} + cidr_range = "{{ subnet.cidr_range }}" +{% endif %} +{% if subnet.tags is defined %} + tags = "{{ subnet.tags | to_json }}" +{% endif %} + }, +{% endfor %} +] + +{% endif %} +{% if provision_aws_ec2_private_subnets is defined %} +private_subnets = [ +{% for subnet in provision_aws_ec2_private_subnets %} + { + az = "{{ subnet.az }}" +{% if subnet.name is defined %} + name = "{{ subnet.name }}" +{% endif %} +{% if subnet.cidr is defined %} + cidr = "{{ subnet.cidr }}" +{% endif %} +{% if subnet.cidr_range is defined %} + cidr_range = "{{ subnet.cidr_range }}" +{% endif %} +{% if subnet.tags is defined %} + tags = "{{ subnet.tags | to_json }}" +{% endif %} + }, +{% endfor %} +] + +{% endif %} +{% if provision_aws_ec2_default_security_group_ingress is defined %} +security_group_rules_ingress = [ +{% for rule in provision_aws_ec2_default_security_group_ingress %} + { + cidr = {{ rule.cidr | to_json }} + from_port = "{{ rule.from | string }}" + to_port = "{{ rule.to | string }}" + protocol = "{{ rule.protocol }}" + }, +{% endfor %} +] + +{% endif %} + +# ------- Compute Resources ------- +{% if provision_instances is defined %} +nodes = [ +{% for grouping in provision_instances %} +{%- for node in range(1, grouping.count + 1 | int) %} + { + name = "{{ [provision_name_prefix, grouping.hostname_prefix,'%02d' | format(node)] | join('-') }}" +{% if grouping.ami_filters is defined or provision_aws_ec2_default_ami is defined %} + ami_filters = { + {% for k, v in (grouping.ami_filters.items() | default(provision_aws_ec2_default_ami)) %} + {{ k }} = {{ v | to_json }}{{ "," if not loop.last else "" }} +{% endfor %} + + } +{% endif %} +{% if grouping.ami_user is defined or provision_default_instance_user is defined %} + ami_user = "{{ grouping.ami_user | default(provision_default_instance_user) }}" +{% endif %} +{% if grouping.instance_type is defined %} + instance_type = "{{ grouping.instance_type }}" +{% endif %} +{% if grouping.elastic_ip is defined %} + elastic_ip = {{ grouping.elastic_ip | bool | string | lower }} +{% endif %} +{% if grouping.private_ip is defined %} + private_ip = "{{ grouping.private_ip }}" +{% endif %} +{% if grouping.subnet_index is defined %} + subnet_index = "{{ grouping.subnet_index }}" +{% endif %} + tags = { + hostname = "{{ [grouping.hostname_prefix,'%02d' | format(node)] | join('-') }}", +{{ inst.instance_tags(grouping) }} + } +{% if grouping.root_volume is defined %} + root_volume = { +{% if grouping.root_volume.delete_on_termination is defined %} + delete_on_termination = {{ grouping.root_volume.delete_on_termination | bool | string | lower }} +{% endif %} +{% if grouping.root_volume.volume_size is defined %} + volume_size = "{{ grouping.root_volume.volume_size }}" +{% endif %} +{% if grouping.root_volume.volume_type is defined %} + volume_type = "{{ grouping.root_volume.volume_type }}" +{% endif %} + } +{% endif %} +{% if grouping.volumes is defined %} + volumes = [ +{% for volume in grouping.volumes %} + { + device_name = "{{ volume.device }}" + mount = "{{ volume.mount }}" + tags = { + Name = "{{ [provision_name_prefix, grouping.hostname_prefix,('%02d' | format(node)), volume.device] | join('-') }}" + } +{% if volume.size is defined %} + volume_size = "{{ volume.size }}" +{% endif %} +{% if volume.type is defined %} + volume_type = "{{ volume.type }}" +{% endif %} + }, +{% endfor %} + ] +{% endif %} + }, +{% endfor -%} +{% endfor %} +] +{% endif %} \ No newline at end of file diff --git a/roles/provision/templates/backend_state.tf.j2 b/roles/provision/templates/backend_state.tf.j2 new file mode 100644 index 00000000..4602affc --- /dev/null +++ b/roles/provision/templates/backend_state.tf.j2 @@ -0,0 +1,11 @@ +{% if provision_state_storage == "local" %} +# Terraform state is stored locally in {{ provision_directory }} +{% elif provision_state_storage == "remote_s3" %} +terraform { + backend "s3" { + region = "{{ provision_remote_storage_s3_region | default(omit) }}" + bucket = "{{ provision_remote_storage_s3_bucket | default(omit) }}" + key = "{{ provision_name_prefix }}/terraform.tfstate" + } +} +{% endif %} \ No newline at end of file diff --git a/roles/provision/templates/inventory.ini.j2 b/roles/provision/templates/inventory.ini.j2 new file mode 100644 index 00000000..df1b9ddf --- /dev/null +++ b/roles/provision/templates/inventory.ini.j2 @@ -0,0 +1,27 @@ +# Generated by Ansible +localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" + +{% import 'inventory_macros.j2' as inv with context %} +{% import 'hostvars.j2' as inv_vars with context %} +{% set hierarchy = inv.group_hierarchy() | from_json %} +{% for group in hierarchy %} +[{{ group }}] +{% for host in groups[group] | difference(inv.group_hosts(hierarchy[group])) %} +{{ host }} +{% endfor %} + +{% if hierarchy[group] %} +[{{ group }}:children] +{% for child in hierarchy[group] %} +{{ child }} +{% endfor %} + +{% endif %} +{% endfor %} +{{ inv.all_variables() }} +[all] +{% for host in hostvars.keys() %} +{% if host != 'localhost' %} +{{ inv_vars.host_variables(hostvars[host]) }} +{% endif %} +{% endfor %} \ No newline at end of file diff --git a/roles/provision/templates/inventory_macros.j2 b/roles/provision/templates/inventory_macros.j2 new file mode 100644 index 00000000..a7b1c363 --- /dev/null +++ b/roles/provision/templates/inventory_macros.j2 @@ -0,0 +1,57 @@ +{# Import using 'with context' #} + +{# Construct the hierarchy of nested groups from flat membership lists #} +{% macro group_hierarchy() %} +{% set hierarchy = {} %} +{% for group in groups if group not in ('all', 'ungrouped') %} +{% set _ = hierarchy.update({ group: [] }) %} +{% for child in groups %} +{% if child not in ('all', 'ungrouped', group) and groups[child] is subset(groups[group]) and (groups[child] | length) != (groups[group] | length) %} +{% set _ = hierarchy[group].append(child) %} +{% endif %} +{% endfor %} +{% endfor %} +{% set reduced_hierarchy = {} %} +{% for group, children in hierarchy.items() %} +{% set reduced_children = reduce(children) %} +{% set _ = reduced_hierarchy.update({ group: reduced_children | from_json }) if reduced_children | length > 0 %} +{% endfor %} +{{ reduced_hierarchy | to_json }} +{% endmacro %} + +{# Remove from a list of children any child that is a subset of another child in the list #} +{% macro reduce(children) %} +{% set supersets = {} %} +{% for p in children | permutations(2) %} +{% if groups[p[0]] is superset(groups[p[1]]) %} +{% set _ = supersets.update({ p[1]: True }) %} +{% endif %} +{% endfor %} +{{ children | difference(supersets.keys()) | to_json }} +{% endmacro %} + +{# Collect and merge all the child hosts of a list of children groups #} +{% macro group_hosts(children) %} +{% set hosts = [] %} +{% for child in children %} +{% set _ = hosts.extend(groups[child]) %} +{% endfor %} +{{ hosts | unique | to_json }} +{% endmacro %} + +{# Configure SSH connection options for dynamic hosts #} +{% macro all_variables() %} +{% set ssh_options = [] %} +{% set _ = ssh_options.append("-o StrictHostKeyChecking=no") if ssh_strict_hostkey_checking is defined and not (ssh_strict_hostkey_checking | bool) %} +{% set _ = ssh_options.append("-o UserKnownHostsFile=/dev/null") if ssh_recycle_hostnames is defined and (ssh_recycle_hostnames | bool) %} +{% set _ = ssh_options.append("-o ProxyCommand=\"ssh -o User=" + jump_host_user + " -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q " + jump_host_ip + "\"") if (jump_host_ip is defined and (jump_host_ip | length > 0))%} +{% set _ = ssh_options.append("-C -o ControlMaster=auto -o ControlPersist=1200s -o BatchMode=yes") if ssh_multiplex is defined and (ssh_multiplex | bool) %} +[all:vars] +ansible_python_interpreter=auto +{% if ssh_options | length > 0 -%} +ansible_ssh_common_args='{{ ssh_options | join(" ") }}' +{% if jump_host_ip is defined -%} +ansible_timeout=60 +{% endif %} +{% endif %} +{% endmacro %} \ No newline at end of file