diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..31b9d30f --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +.idea/workspace.xml +.DS_Store +*.tfstate +.terraform/ +terraform.tfvars diff --git a/README.md b/README.md index 1011c0c5..6d161eae 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,40 @@ To remove the VM's that have been deployed run `terraform destroy --force` **Please be aware that you will be responsible for the usage charges with Digital Ocean** +## vSphere quick start + +The vsphere folder contains terraform code to stand up a single Rancher server instance with a 3 node cluster attached to it. + +This terraform setup will: + +- Create a VM in vSphere running `rancher/rancher` version specified in `rancher_version` +- Create a custom cluster called `qs-cluster` +- Start `count_agent_all_nodes` amount of VMs in vSphere and add them to the custom cluster with all roles + +### How to use + +#### Create RancherOS template + +The terraform setup creates VMs by cloning a RancherOS template that must be made available in vCenter: + +1. Download the RancherOS OVA appliance from https://transfer.sh/FZfU3/rancheros-v1.4.0-vapp.ova +2. Import the OVA file by right-clicking on a cluster or host in the inventory and selecting "Deploy OVF template...". +3. Mark the resulting VM as template -> "Convert to template". +4. Note the name/path of the template. + +#### Run the terraform code + +1. Clone this repository and go into the `vsphere` subfolder +2. Move the file `terraform.tfvars.example` to `terraform.tfvars` and edit (see inline explanation) +3. Run `terraform init` +4. Run `terraform apply` + +When provisioning has finished you will be given the url to connect to the Rancher Server. + +### How to Remove + +To remove the VM's that have been deployed run `terraform destroy --force` + ## Vagrant quick start The vagrant folder contains a vagrant code to stand up a single Rancher server instance with a 3 node cluster attached to it. diff --git a/vsphere/cloud_config.tf b/vsphere/cloud_config.tf new file mode 100755 index 00000000..7ea94f9c --- /dev/null +++ b/vsphere/cloud_config.tf @@ -0,0 +1,23 @@ +# Renders the cloud-config file for the server +data "template_file" "server" { + template = "${file("${path.module}/files/cloud-config-server.tpl")}" + + vars { + authorized_key = "${tls_private_key.provisioning_key.public_key_openssh}" + hostname = "${local.server_name_prefix}" + docker_version = "${var.node_docker_version}" + rancher_version= "${var.rancher_version}" + } +} + +# Renders the cloud-config file for the cluster nodes +data "template_file" "cluster" { + template = "${file("${path.module}/files/cloud-config-cluster.tpl")}" + count = "${var.rancher_num_cluster_nodes}" + + vars { + authorized_key = "${tls_private_key.provisioning_key.public_key_openssh}" + hostname = "${local.cluster_nodes_name_prefix}-${count.index + 1}" + docker_version = "${var.node_docker_version}" + } +} \ No newline at end of file diff --git a/vsphere/data_sources.tf b/vsphere/data_sources.tf new file mode 100755 index 00000000..82aa2c05 --- /dev/null +++ b/vsphere/data_sources.tf @@ -0,0 +1,32 @@ +data "vsphere_datacenter" "dc" { + name = "${var.vsphere_datacenter}" +} + +# Use count with tenary operator for conditional fetching of this data source +data "vsphere_compute_cluster" "cluster" { + count = "${var.vsphere_cluster != "" ? 1 : 0}" + name = "${var.vsphere_cluster}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +# Use count with tenary operator for conditional fetching of this data source +data "vsphere_host" "host" { + count = "${var.vsphere_host != "" ? 1 : 0}" + name = "${var.vsphere_host}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +data "vsphere_datastore" "datastore" { + name = "${var.vsphere_datastore}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +data "vsphere_network" "network" { + name = "${var.vsphere_network}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +data "vsphere_virtual_machine" "template" { + name = "${var.vsphere_template}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} diff --git a/vsphere/files/agent_bootstrap.sh b/vsphere/files/agent_bootstrap.sh new file mode 100644 index 00000000..09cb20a1 --- /dev/null +++ b/vsphere/files/agent_bootstrap.sh @@ -0,0 +1,104 @@ +#!/bin/sh +set -x + +curlimage=appropriate/curl +jqimage=stedolan/jq +cluster_name=qs-cluster + +if [ $# -lt 2 ]; then + echo "required argument missing" + exit 1 +fi + +rancher_server_ip=$1 +rancher_password=$2 + +for image in $curlimage $jqimage; do + until docker inspect $image > /dev/null 2>&1; do + docker pull $image + sleep 2 + done +done + +while true; do + docker run --rm $curlimage -sLk https://$rancher_server_ip/ping && break + sleep 5 +done + +# Login +while true; do + + LOGINRESPONSE=$(docker run \ + --rm \ + $curlimage \ + -s "https://$rancher_server_ip/v3-public/localProviders/local?action=login" -H 'content-type: application/json' --data-binary '{"username":"admin","password":"'$rancher_password'"}' --insecure) + LOGINTOKEN=$(echo $LOGINRESPONSE | docker run --rm -i $jqimage -r .token) + + if [ "$LOGINTOKEN" != "null" ]; then + break + else + sleep 5 + fi +done + +# Get the Agent Image from the rancher server +while true; do + AGENTIMAGE=$(docker run \ + --rm \ + $curlimage \ + -sLk \ + -H "Authorization: Bearer $LOGINTOKEN" \ + "https://$rancher_server_ip/v3/settings/agent-image" | docker run --rm -i $jqimage -r '.value') + + if [ -n "$AGENTIMAGE" ]; then + break + else + sleep 5 + fi +done + +until docker inspect $AGENTIMAGE > /dev/null 2>&1; do + docker pull $AGENTIMAGE + sleep 2 +done + +# Test if cluster is created +while true; do + CLUSTERID=$(docker run \ + --rm \ + $curlimage \ + -sLk \ + -H "Authorization: Bearer $LOGINTOKEN" \ + "https://$rancher_server_ip/v3/clusters?name=$cluster_name" | docker run --rm -i $jqimage -r '.data[].id') + + if [ -n "$CLUSTERID" ]; then + break + else + sleep 5 + fi +done + +ROLEFLAG="all-roles" + +# Get token +# Test if cluster is created +while true; do + AGENTCMD=$(docker run \ + --rm \ + $curlimage \ + -sLk \ + -H "Authorization: Bearer $LOGINTOKEN" \ + "https://$rancher_server_ip/v3/clusterregistrationtoken?clusterId=$CLUSTERID" | docker run --rm -i $jqimage -r '.data[].nodeCommand' | head -1) + + if [ -n "$AGENTCMD" ]; then + break + else + sleep 5 + fi +done + +# Combine command and flags +COMPLETECMD="$AGENTCMD --$ROLEFLAG" + +# Run command +$COMPLETECMD diff --git a/vsphere/files/cloud-config-cluster.tpl b/vsphere/files/cloud-config-cluster.tpl new file mode 100644 index 00000000..75e08be1 --- /dev/null +++ b/vsphere/files/cloud-config-cluster.tpl @@ -0,0 +1,7 @@ +#cloud-config +hostname: ${hostname} +ssh_authorized_keys: + - ${authorized_key} +rancher: + docker: + engine: docker-${docker_version} diff --git a/vsphere/files/cloud-config-server.tpl b/vsphere/files/cloud-config-server.tpl new file mode 100644 index 00000000..e5b89719 --- /dev/null +++ b/vsphere/files/cloud-config-server.tpl @@ -0,0 +1,17 @@ +#cloud-config +hostname: ${hostname} +ssh_authorized_keys: + - ${authorized_key} +rancher: + docker: + engine: docker-${docker_version} + services: + rancher-qs-server: + image: rancher/rancher:${rancher_version} + restart: unless-stopped + privileged: false + ports: + - 80:80 + - 443:443 + volumes: + - /root/rancher:/var/lib/rancher diff --git a/vsphere/files/server_bootstrap.sh b/vsphere/files/server_bootstrap.sh new file mode 100644 index 00000000..ed3bb7d0 --- /dev/null +++ b/vsphere/files/server_bootstrap.sh @@ -0,0 +1,64 @@ +#!/bin/sh +set -x + +curlimage=appropriate/curl +jqimage=stedolan/jq +cluster_name=qs-cluster + +if [ $# -lt 1 ]; then + echo "required argument missing" + exit 1 +fi + +password=$1 + +for image in $curlimage $jqimage; do + docker pull $image + sleep 2 +done + +while true; do + docker run --rm --net=host $curlimage -sLk https://127.0.0.1/ping && break + sleep 5 +done + +# Login +while true; do + + LOGINRESPONSE=$(docker run \ + --rm \ + --net=host \ + $curlimage \ + -s "https://127.0.0.1/v3-public/localProviders/local?action=login" -H 'content-type: application/json' --data-binary '{"username":"admin","password":"admin"}' --insecure) + LOGINTOKEN=$(echo $LOGINRESPONSE | docker run --rm -i $jqimage -r .token) + echo "Login Token is $LOGINTOKEN" + if [ "$LOGINTOKEN" != "null" ]; then + break + else + sleep 5 + fi +done + + +# Change password +payload="{\"currentPassword\":\"admin\",\"newPassword\":\"$password\"}" +docker run --rm --net=host $curlimage -s "https://127.0.0.1/v3/users?action=changepassword" -H "content-type: application/json" -H "Authorization: Bearer $LOGINTOKEN" --data-binary $payload --insecure + +# Create API key +APIRESPONSE=$(docker run --rm --net=host $curlimage -s 'https://127.0.0.1/v3/token' -H 'content-type: application/json' -H "Authorization: Bearer $LOGINTOKEN" --data-binary '{"type":"token","description":"automation"}' --insecure) + +# Extract and store token +APITOKEN=`echo $APIRESPONSE | docker run --rm -i $jqimage -r .token` + +# Configure server-url +RANCHER_SERVER="https://$(ifconfig eth0 | grep 'inet addr' | cut -d: -f2 | awk '{print $1}')" +docker run --rm --net=host $curlimage -s 'https://127.0.0.1/v3/settings/server-url' -H 'content-type: application/json' -H "Authorization: Bearer $APITOKEN" -X PUT --data-binary '{"name":"server-url","value":"'$RANCHER_SERVER'"}' --insecure + +# Create cluster +CLUSTERRESPONSE=$(docker run --rm --net=host $curlimage -s 'https://127.0.0.1/v3/cluster' -H 'content-type: application/json' -H "Authorization: Bearer $APITOKEN" --data-binary '{"type":"cluster","rancherKubernetesEngineConfig":{"addonJobTimeout":30,"ignoreDockerVersion":true,"sshAgentAuth":false,"type":"rancherKubernetesEngineConfig","authentication":{"type":"authnConfig","strategy":"x509"},"network":{"type":"networkConfig","plugin":"canal"},"ingress":{"type":"ingressConfig","provider":"nginx"},"services":{"type":"rkeConfigServices","kubeApi":{"podSecurityPolicy":false,"type":"kubeAPIService"},"etcd":{"snapshot":false,"type":"etcdService","extraArgs":{"heartbeat-interval":500,"election-timeout":5000}}}},"name":"'$cluster_name'"}' --insecure) + +# Extract clusterid to use for generating the docker run command +CLUSTERID=`echo $CLUSTERRESPONSE | docker run --rm -i $jqimage -r .id` + +# Generate registrationtoken +docker run --rm --net=host $curlimage -s 'https://127.0.0.1/v3/clusterregistrationtoken' -H 'content-type: application/json' -H "Authorization: Bearer $APITOKEN" --data-binary '{"type":"clusterRegistrationToken","clusterId":"'$CLUSTERID'"}' --insecure diff --git a/vsphere/locals.tf b/vsphere/locals.tf new file mode 100644 index 00000000..f875e90d --- /dev/null +++ b/vsphere/locals.tf @@ -0,0 +1,7 @@ +locals { + server_name_prefix = "rancher-qs-server" + cluster_nodes_name_prefix = "rancher-qs-node" + # Some trickery to work around limitation of terrafom tenary operator + # Allows the user to specify either the name of a cluster or an ESXi host + pool_id = "${var.vsphere_cluster != "" ? join("", data.vsphere_compute_cluster.cluster.*.resource_pool_id) : join("", data.vsphere_host.host.*.resource_pool_id)}" +} diff --git a/vsphere/outputs.tf b/vsphere/outputs.tf new file mode 100644 index 00000000..7414d54e --- /dev/null +++ b/vsphere/outputs.tf @@ -0,0 +1,3 @@ +output "rancher-url" { + value = ["https://${vsphere_virtual_machine.server.default_ip_address}"] +} \ No newline at end of file diff --git a/vsphere/providers.tf b/vsphere/providers.tf new file mode 100755 index 00000000..f0ff7034 --- /dev/null +++ b/vsphere/providers.tf @@ -0,0 +1,8 @@ +# Configure the vSphere Provider +provider "vsphere" { + version = "~> 1.8" + user = "${var.vcenter_user}" + password = "${var.vcenter_password}" + vsphere_server = "${var.vcenter_server}" + allow_unverified_ssl = "${var.vcenter_insecure}" +} diff --git a/vsphere/provisioner.tf b/vsphere/provisioner.tf new file mode 100644 index 00000000..d66fe02a --- /dev/null +++ b/vsphere/provisioner.tf @@ -0,0 +1,5 @@ +# Generate a short-lived SSH key for the provisioner to access the nodes. +# The key is removed from the nodes during provisioning. +resource "tls_private_key" "provisioning_key" { + algorithm = "RSA" +} diff --git a/vsphere/terraform.tfvars.example b/vsphere/terraform.tfvars.example new file mode 100755 index 00000000..6b76331c --- /dev/null +++ b/vsphere/terraform.tfvars.example @@ -0,0 +1,23 @@ +vcenter_user = "username" +vcenter_password = "password" +vcenter_server = "vcenter.acme.com" +vcenter_insecure = false + +rancher_admin_password = "changeme" +rancher_version = "v2.0.8" +rancher_num_cluster_nodes = 3 + +node_num_cpus = 2 +node_memory_gb = 4 +node_docker_version = "17.03.1-ce" + +vsphere_datacenter = "HE-FMT" +vsphere_host = "192.168.100.222" +vsphere_datastore = "datastore-192-168-100-222" +vsphere_network = "VM Network" +vsphere_template = "vm-templates/rancheros-v1.4.0-golden" +vsphere_folder = "rancher-qs" + +authorized_ssh_key = "ssh-rsa abcdef user@host", + + diff --git a/vsphere/variables.tf b/vsphere/variables.tf new file mode 100755 index 00000000..3d2601ea --- /dev/null +++ b/vsphere/variables.tf @@ -0,0 +1,111 @@ +#-----------------------------------------# +# vCenter Connection +#-----------------------------------------# + +# vCenter username +variable "vcenter_user" { + type = "string" +} + +# vCenter password +variable "vcenter_password" { + type = "string" +} + +# vCenter server FQDN or IP address +variable "vcenter_server" { + type = "string" +} + +// Skip certificate verification +variable "vcenter_insecure" { + default = false +} + +#-----------------------------------------# +# Rancher Configuration +#-----------------------------------------# + +# Rancher admin password to use +variable "rancher_admin_password" { + type = "string" +} + +# Rancher image tag/version to use +variable "rancher_version" { + default = "latest" +} + +# Number of nodes to create for first cluster +variable "rancher_num_cluster_nodes" { + default = 3 +} + +#-----------------------------------------# +# Cluster Node Configuration +#-----------------------------------------# + +// Number of vCPUs to assign to cluster nodes +variable "node_num_cpus" { + default = 2 +} + +// Memory size in GB to assign to cluster nodes +variable "node_memory_gb" { + default = 2 +} + +// Docker version to use for nodes +variable "node_docker_version" { + default = "17.03.1-ce" +} + +#-----------------------------------------# +# vSphere Resource Configuration +#-----------------------------------------# + +# vSphere datacenter to use +variable "vsphere_datacenter" { + type = "string" +} + +# vSphere cluster to use (required unless vsphere_host is specified) +variable "vsphere_cluster" { + type = "string" + default = "" +} + +# vSphere host to use (required unless vsphere_cluster is specified) +variable "vsphere_host" { + type = "string" + default = "" +} + +# Name/path of datastore to use +variable "vsphere_datastore" { + type = "string" +} + +# VM Network to attach the VMs +variable "vsphere_network" { + type = "string" +} + +# Name/path of RancherOS template to clone VMs from +variable "vsphere_template" { + type = "string" +} + +# Name/path of inventory folder in which to store the VMs +variable "vsphere_folder" { + default= "rancher-quickstart" +} + +#-----------------------------------------# +# Management +#-----------------------------------------# + +# SSH public key to authorize on all VMs +variable "authorized_ssh_key" { + type = "string" +} diff --git a/vsphere/virtual_machines.tf b/vsphere/virtual_machines.tf new file mode 100755 index 00000000..62c20299 --- /dev/null +++ b/vsphere/virtual_machines.tf @@ -0,0 +1,137 @@ +# Creates the VM inventory folder +resource "vsphere_folder" "folder" { + path = "${var.vsphere_folder}" + type = "vm" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +# Creates and provisions a VM for the server +resource "vsphere_virtual_machine" "server" { + name = "${local.server_name_prefix}" + resource_pool_id = "${local.pool_id}" + datastore_id = "${data.vsphere_datastore.datastore.id}" + folder = "${var.vsphere_folder}" + + num_cpus = 2 + memory = 4096 + guest_id = "${data.vsphere_virtual_machine.template.guest_id}" + scsi_type = "${data.vsphere_virtual_machine.template.scsi_type}" + + network_interface { + network_id = "${data.vsphere_network.network.id}" + adapter_type = "${data.vsphere_virtual_machine.template.network_interface_types[0]}" + } + + disk { + label = "disk0" + size = "${data.vsphere_virtual_machine.template.disks.0.size}" + eagerly_scrub = "${data.vsphere_virtual_machine.template.disks.0.eagerly_scrub}" + thin_provisioned = "${data.vsphere_virtual_machine.template.disks.0.thin_provisioned}" + } + + clone { + template_uuid = "${data.vsphere_virtual_machine.template.id}" + linked_clone = false + } + + vapp { + properties { + "guestinfo.cloud-init.config.data" = "${base64encode("${data.template_file.server.rendered}")}" + "guestinfo.cloud-init.data.encoding" = "base64" + } + } + + # Copy script to bootstrap Rancher server + provisioner "file" { + source = "${path.module}/files/server_bootstrap.sh" + destination = "/tmp/bootstrap.sh" + connection { + type = "ssh" + user = "rancher" + private_key = "${tls_private_key.provisioning_key.private_key_pem}" + } + } + + # Execute bootstrap script and provision user SSH key + provisioner "remote-exec" { + inline = [ + "chmod +x /tmp/bootstrap.sh", + "/tmp/bootstrap.sh \"${var.rancher_admin_password}\"", + "sudo echo \"${var.authorized_ssh_key}\" > /home/rancher/.ssh/authorized_keys", + "sudo ros config set ssh_authorized_keys ['${var.authorized_ssh_key}']", + ] + + connection { + type = "ssh" + user = "rancher" + private_key = "${tls_private_key.provisioning_key.private_key_pem}" + } + } +} + +# Creates and provisions VMs for the cluster +resource "vsphere_virtual_machine" "nodes" { + count = "${var.rancher_num_cluster_nodes}" + name = "${local.cluster_nodes_name_prefix}-${count.index + 1}" + resource_pool_id = "${local.pool_id}" + datastore_id = "${data.vsphere_datastore.datastore.id}" + folder = "${var.vsphere_folder}" + + //num_cpus = "${var.node_num_cpus}" + //memory = "${var.node_memory_gb}" + num_cpus = 2 + memory = 4096 + guest_id = "${data.vsphere_virtual_machine.template.guest_id}" + scsi_type = "${data.vsphere_virtual_machine.template.scsi_type}" + + network_interface { + network_id = "${data.vsphere_network.network.id}" + adapter_type = "${data.vsphere_virtual_machine.template.network_interface_types[0]}" + } + + disk { + label = "disk0" + size = "${data.vsphere_virtual_machine.template.disks.0.size}" + eagerly_scrub = "${data.vsphere_virtual_machine.template.disks.0.eagerly_scrub}" + thin_provisioned = "${data.vsphere_virtual_machine.template.disks.0.thin_provisioned}" + } + + clone { + template_uuid = "${data.vsphere_virtual_machine.template.id}" + linked_clone = false + } + + vapp { + properties { + "guestinfo.cloud-init.config.data" = "${base64encode("${data.template_file.cluster.*.rendered[count.index]}")}" + "guestinfo.cloud-init.data.encoding" = "base64" + } + } + + # Copy script to bootstrap Rancher agent + provisioner "file" { + source = "${path.module}/files/agent_bootstrap.sh" + destination = "/tmp/bootstrap.sh" + connection { + type = "ssh" + user = "rancher" + private_key = "${tls_private_key.provisioning_key.private_key_pem}" + } + } + + # Execute bootstrap script and provision user SSH key + provisioner "remote-exec" { + inline = [ + "chmod +x /tmp/bootstrap.sh", + "/tmp/bootstrap.sh \"${vsphere_virtual_machine.server.default_ip_address}\" \"${var.rancher_admin_password}\"", + "sudo echo \"${var.authorized_ssh_key}\" > /home/rancher/.ssh/authorized_keys", + "sudo ros config set ssh_authorized_keys ['${var.authorized_ssh_key}']", + ] + + connection { + type = "ssh" + user = "rancher" + private_key = "${tls_private_key.provisioning_key.private_key_pem}" + } + } +}