diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..b9bfcf80 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +.idea/ +.DS_Store +*.tfstate +*.tfstate.backup +.terraform/ +*.tfvars diff --git a/README.md b/README.md index 1011c0c5..d11db69c 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,64 @@ To remove the VM's that have been deployed run `terraform destroy --force` **Please be aware that you will be responsible for the usage charges with Digital Ocean** +## vSphere quick start + +The vsphere folder contains terraform code to stand up a single Rancher server instance with a 3 node cluster attached to it. + +This terraform setup will: + +- Create a VM in vSphere running `rancher/rancher` version specified in `rancher_version` +- Create a custom cluster called `cluster_name` +- Start `count_agent_all_nodes` amount of VMs in vSphere and add them to the custom cluster with all roles + +### Prerequisites + +#### VMware vSphere + +The terraform code was tested on vSphere 6.7 but should work with vSphere 6.0 and later. + +#### VM Network + +There must be VM network available in vSphere that provides: +- IP address assignment via DHCP +- Internet access to the public Docker registry (aka Docker Hub) + +#### Ubuntu Cloud Image VM template + +Before running the terraform code you must create a VM template in vSphere based off of the official Ubuntu 16.04 LTS cloud image. This is so that the VMs can be correctly bootstrapped using a Cloud-Init userdata script. + +1. Log in to vCenter using the vSphere web console. +2. Right-click on the inventory list and select "Deploy OVF template...". +3. Specify the URL to the Ubuntu 16.04 LTS cloud image virtual appliance and hit *Next*: [ubuntu-16.04-server-cloudimg-amd64.ova](https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64.ova) +4. Select an inventory folder to save the VM template in. +5. Select the cluster, host or resource pool in which to temporarily create the VM before converting it to a template. +6. Select a (preferably shared) datastore for the disk image. +7. Select the network to use for the template. +8. Skip the "Customize template" step. +9. Navigate to the newly created VM, click "Edit Settings..." in the context menu and update the size of "Hard disk 1" to 25GB or larger. +10. Finally convert the VM to a template by selecting "Convert to template..." in the context menu. + +### How to use + +1. Clone this repository and go into the `vsphere` subfolder. +2. Copy the file `terraform.tfvars.example` to `terraform.tfvars` and modify the later to match your environment (see inline comments in `variables.tf`). +3. At least specify the name/path of the Ubuntu template (`vsphere_template`) as well as the following configuration variables: + - `vcenter_user` + - `vcenter_password` + - `vcenter_server` + - `vsphere_datacenter` + - One of `vsphere_resource_pool` or `vsphere_cluster` + - `vsphere_datastore` + - `vsphere_network` +4. Run `terraform init` +5. Run `terraform apply` + +When provisioning has finished you will be given the url to connect to the Rancher Server. Log in with username `admin` and the password specified in the `rancher_admin_password` config variable. + +### How to Remove + +To remove the VM's that have been deployed run `terraform destroy --force` + ## Vagrant quick start The vagrant folder contains a vagrant code to stand up a single Rancher server instance with a 3 node cluster attached to it. diff --git a/vsphere/data_sources.tf b/vsphere/data_sources.tf new file mode 100755 index 00000000..b871e22b --- /dev/null +++ b/vsphere/data_sources.tf @@ -0,0 +1,39 @@ +data "vsphere_datacenter" "dc" { + name = "${var.vsphere_datacenter}" +} + +# Use count with tenary operator for conditional fetching of this data source +data "vsphere_compute_cluster" "cluster" { + count = "${var.vsphere_cluster != "" ? 1 : 0}" + name = "${var.vsphere_cluster}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +# Use count with tenary operator for conditional fetching of this data source +data "vsphere_resource_pool" "pool" { + count = "${var.vsphere_resource_pool != "" ? 1 : 0}" + name = "${var.vsphere_resource_pool}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +# Use count with tenary operator for conditional fetching of this data source +# data "vsphere_host" "host" { +# count = "${var.vsphere_host != "" ? 1 : 0}" +# name = "${var.vsphere_host}" +# datacenter_id = "${data.vsphere_datacenter.dc.id}" +# } + +data "vsphere_datastore" "datastore" { + name = "${var.vsphere_datastore}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +data "vsphere_network" "network" { + name = "${var.vsphere_network}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +data "vsphere_virtual_machine" "template" { + name = "${var.vsphere_template}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} diff --git a/vsphere/files/userdata_agent b/vsphere/files/userdata_agent new file mode 100644 index 00000000..64942210 --- /dev/null +++ b/vsphere/files/userdata_agent @@ -0,0 +1,101 @@ +#!/bin/bash -x +export curlimage=appropriate/curl +export jqimage=stedolan/jq +export rancher_server_ip='${server_address}' + +if [ `command -v curl` ]; then + curl -sL https://releases.rancher.com/install-docker/${docker_version}.sh | sh +elif [ `command -v wget` ]; then + wget -qO- https://releases.rancher.com/install-docker/${docker_version}.sh | sh +fi + +for image in $curlimage $jqimage; do + docker pull $image +done + +while true; do + docker run --rm $curlimage -sLk https://$rancher_server_ip/ping && break + sleep 5 +done + +# Login +while true; do + + LOGINRESPONSE=$(docker run \ + --rm \ + $curlimage \ + -s "https://$rancher_server_ip/v3-public/localProviders/local?action=login" -H 'content-type: application/json' --data-binary '{"username":"admin","password":"${admin_password}"}' --insecure) + LOGINTOKEN=$(echo $LOGINRESPONSE | docker run --rm -i $jqimage -r .token) + + if [ "$LOGINTOKEN" != "null" ]; then + break + else + sleep 5 + fi +done + +# Get the Agent Image from the rancher server +while true; do + AGENTIMAGE=$(docker run \ + --rm \ + $curlimage \ + -sLk \ + -H "Authorization: Bearer $LOGINTOKEN" \ + "https://$rancher_server_ip/v3/settings/agent-image" | docker run --rm -i $jqimage -r '.value') + + if [ -n "$AGENTIMAGE" ]; then + break + else + sleep 5 + fi +done + +until docker inspect $AGENTIMAGE > /dev/null 2>&1; do + docker pull $AGENTIMAGE + sleep 2 +done + +# Test if cluster is created +while true; do + CLUSTERID=$(docker run \ + --rm \ + $curlimage \ + -sLk \ + -H "Authorization: Bearer $LOGINTOKEN" \ + "https://$rancher_server_ip/v3/clusters?name=${cluster_name}" | docker run --rm -i $jqimage -r '.data[].id') + + if [ -n "$CLUSTERID" ]; then + break + else + sleep 5 + fi +done + +# Get role flags from hostname +ROLEFLAG=`hostname | awk -F'-' '{ print $NF }'` +if [[ "$ROLEFLAG" == "all" ]]; then + ROLEFLAG="all-roles" +fi + +# Get token +# Test if cluster is created +while true; do + AGENTCMD=$(docker run \ + --rm \ + $curlimage \ + -sLk \ + -H "Authorization: Bearer $LOGINTOKEN" \ + "https://$rancher_server_ip/v3/clusterregistrationtoken?clusterId=$CLUSTERID" | docker run --rm -i $jqimage -r '.data[].nodeCommand' | head -1) + + if [ -n "$AGENTCMD" ]; then + break + else + sleep 5 + fi +done + +# Combine command and flags +COMPLETECMD="$AGENTCMD --$ROLEFLAG" + +# Run command +$COMPLETECMD diff --git a/vsphere/files/userdata_server b/vsphere/files/userdata_server new file mode 100644 index 00000000..17f9b5aa --- /dev/null +++ b/vsphere/files/userdata_server @@ -0,0 +1,60 @@ +#!/bin/bash -x +export curlimage=appropriate/curl +export jqimage=stedolan/jq + +if [ `command -v curl` ]; then + curl -sL https://releases.rancher.com/install-docker/${docker_version}.sh | sh +elif [ `command -v wget` ]; then + wget -qO- https://releases.rancher.com/install-docker/${docker_version}.sh | sh +fi + +for image in $curlimage $jqimage "rancher/rancher:${rancher_version}"; do + docker pull $image +done + +docker run -d --restart=unless-stopped -p 80:80 -p 443:443 -v /root/rancher:/var/lib/rancher rancher/rancher:${rancher_version} + +while true; do + docker run --rm --net=host $curlimage -sLk https://127.0.0.1/ping && break + sleep 5 +done + +# Login +while true; do + + LOGINRESPONSE=$(docker run \ + --rm \ + --net=host \ + $curlimage \ + -s "https://127.0.0.1/v3-public/localProviders/local?action=login" -H 'content-type: application/json' --data-binary '{"username":"admin","password":"admin"}' --insecure) + LOGINTOKEN=$(echo $LOGINRESPONSE | docker run --rm -i $jqimage -r .token) + echo "Login Token is $LOGINTOKEN" + if [ "$LOGINTOKEN" != "null" ]; then + break + else + sleep 5 + fi +done + + +# Change password +docker run --rm --net=host $curlimage -s 'https://127.0.0.1/v3/users?action=changepassword' -H 'content-type: application/json' -H "Authorization: Bearer $LOGINTOKEN" --data-binary '{"currentPassword":"admin","newPassword":"${admin_password}"}' --insecure + +# Create API key +APIRESPONSE=$(docker run --rm --net=host $curlimage -s 'https://127.0.0.1/v3/token' -H 'content-type: application/json' -H "Authorization: Bearer $LOGINTOKEN" --data-binary '{"type":"token","description":"automation"}' --insecure) + +# Extract and store token +APITOKEN=`echo $APIRESPONSE | docker run --rm -i $jqimage -r .token` + +# Configure server-url +RANCHER_SERVER="https://$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $NF}')" +docker run --rm --net=host $curlimage -s 'https://127.0.0.1/v3/settings/server-url' -H 'content-type: application/json' -H "Authorization: Bearer $APITOKEN" -X PUT --data-binary '{"name":"server-url","value":"'$RANCHER_SERVER'"}' --insecure + +# Create cluster +CLUSTERRESPONSE=$(docker run --rm --net=host $curlimage -s 'https://127.0.0.1/v3/cluster' -H 'content-type: application/json' -H "Authorization: Bearer $APITOKEN" --data-binary '{"type":"cluster","rancherKubernetesEngineConfig":{"addonJobTimeout":30,"ignoreDockerVersion":true,"sshAgentAuth":false,"type":"rancherKubernetesEngineConfig","authentication":{"type":"authnConfig","strategy":"x509"},"network":{"type":"networkConfig","plugin":"canal"},"ingress":{"type":"ingressConfig","provider":"nginx"},"services":{"type":"rkeConfigServices","kubeApi":{"podSecurityPolicy":false,"type":"kubeAPIService"},"etcd":{"snapshot":false,"type":"etcdService","extraArgs":{"heartbeat-interval":500,"election-timeout":5000}}}},"name":"${cluster_name}"}' --insecure) + +# Extract clusterid to use for generating the docker run command +CLUSTERID=`echo $CLUSTERRESPONSE | docker run --rm -i $jqimage -r .id` + +# Generate registrationtoken +docker run --rm --net=host $curlimage -s 'https://127.0.0.1/v3/clusterregistrationtoken' -H 'content-type: application/json' -H "Authorization: Bearer $APITOKEN" --data-binary '{"type":"clusterRegistrationToken","clusterId":"'$CLUSTERID'"}' --insecure \ No newline at end of file diff --git a/vsphere/locals.tf b/vsphere/locals.tf new file mode 100644 index 00000000..7523ac67 --- /dev/null +++ b/vsphere/locals.tf @@ -0,0 +1,9 @@ +locals { + server_name_prefix = "rancher-qs-server" + cluster_nodes_name_prefix = "rancher-qs-node" + + # Support either of cluster or host name + # pool_id = "${var.vsphere_cluster != "" ? join("", data.vsphere_compute_cluster.cluster.*.resource_pool_id) : join("", data.vsphere_host.host.*.resource_pool_id)}" + # Support either of cluster or resource pool name + pool_id = "${var.vsphere_cluster != "" ? join("", data.vsphere_compute_cluster.cluster.*.resource_pool_id) : join("", data.vsphere_resource_pool.pool.*.id)}" +} \ No newline at end of file diff --git a/vsphere/output.tf b/vsphere/output.tf new file mode 100644 index 00000000..7414d54e --- /dev/null +++ b/vsphere/output.tf @@ -0,0 +1,3 @@ +output "rancher-url" { + value = ["https://${vsphere_virtual_machine.server.default_ip_address}"] +} \ No newline at end of file diff --git a/vsphere/provider.tf b/vsphere/provider.tf new file mode 100755 index 00000000..f0ff7034 --- /dev/null +++ b/vsphere/provider.tf @@ -0,0 +1,8 @@ +# Configure the vSphere Provider +provider "vsphere" { + version = "~> 1.8" + user = "${var.vcenter_user}" + password = "${var.vcenter_password}" + vsphere_server = "${var.vcenter_server}" + allow_unverified_ssl = "${var.vcenter_insecure}" +} diff --git a/vsphere/terraform.tfvars.example b/vsphere/terraform.tfvars.example new file mode 100755 index 00000000..342ababb --- /dev/null +++ b/vsphere/terraform.tfvars.example @@ -0,0 +1,22 @@ +vcenter_user = "username" +vcenter_password = "password" +vcenter_server = "vcenter.acme.com" +vcenter_insecure = false + +rancher_admin_password = "changeme" +rancher_version = "v2.1.3" +rancher_num_cluster_nodes = 3 +rancher_cluster_name = "quickstart" + +node_num_cpus = "2" +node_memory_mb = "4096" +node_docker_version = "17.03" + +vsphere_datacenter = "HE-FMT" +vsphere_resource_pool = "pool-foo" +vsphere_datastore = "datastore-shared" +vsphere_network = "VM Network" +vsphere_template = "vm-templates/ubuntu-16.04-server-cloudimg-amd64.ova" +vsphere_folder = "rancher-qs" + +authorized_ssh_key = "ssh-rsa MIIEogIBAA... user@host" diff --git a/vsphere/userdata.tf b/vsphere/userdata.tf new file mode 100755 index 00000000..dac4292b --- /dev/null +++ b/vsphere/userdata.tf @@ -0,0 +1,24 @@ +# Renders the userdata for the server +data "template_file" "userdata_server" { + template = "${file("${path.module}/files/userdata_server")}" + + vars { + admin_password = "${var.rancher_admin_password}" + cluster_name = "${var.rancher_cluster_name}" + docker_version = "${var.docker_version}" + rancher_version = "${var.rancher_version}" + } +} + +# Renders the userdata for the cluster nodes +data "template_file" "userdata_agent" { + template = "${file("${path.module}/files/userdata_agent")}" + + vars { + admin_password = "${var.rancher_admin_password}" + cluster_name = "${var.rancher_cluster_name}" + docker_version = "${var.docker_version}" + rancher_version = "${var.rancher_version}" + server_address = "${vsphere_virtual_machine.server.default_ip_address}" + } +} diff --git a/vsphere/variables.tf b/vsphere/variables.tf new file mode 100755 index 00000000..f3992e78 --- /dev/null +++ b/vsphere/variables.tf @@ -0,0 +1,123 @@ +#-----------------------------------------# +# vCenter Connection +#-----------------------------------------# + +# vCenter username +variable "vcenter_user" { + type = "string" +} + +# vCenter password +variable "vcenter_password" { + type = "string" +} + +# vCenter server FQDN or IP address +variable "vcenter_server" { + type = "string" +} + +// Skip certificate verification +variable "vcenter_insecure" { + default = false +} + +#-----------------------------------------# +# Rancher Configuration +#-----------------------------------------# + +# Rancher admin password to use +variable "rancher_admin_password" { + type = "string" +} + +# Rancher image tag/version to use +variable "rancher_version" { + default = "latest" +} + +# Number of nodes to create for the first cluster +variable "rancher_num_cluster_nodes" { + default = 3 +} + +# Name the first cluster +variable "rancher_cluster_name" { + default = "quickstart" +} + +#-----------------------------------------# +# Node Configuration +#-----------------------------------------# + +// Number of vCPUs to assign to worker nodes +variable "node_num_cpus" { + default = "2" +} + +// Memory size in MB to assign to worker nodes +variable "node_memory_mb" { + default = "4096" +} + +// Docker version to install on VMs +variable "docker_version" { + default = "17.03" +} + +#-----------------------------------------# +# vSphere Resource Configuration +#-----------------------------------------# + +# vSphere datacenter to use +variable "vsphere_datacenter" { + type = "string" +} + +# vSphere cluster to use (required unless vsphere_resource_pool is specified) +variable "vsphere_cluster" { + type = "string" + default = "" +} + +# vSphere resource pool to use (required unless vsphere_cluster is specified) +variable "vsphere_resource_pool" { + type = "string" + default = "" +} + +# vSphere host to use (required unless vsphere_cluster or vsphere_resource_pool are specified) +# variable "vsphere_host" { +# type = "string" +# default = "" +# } + +# Name/path of datastore to use +variable "vsphere_datastore" { + type = "string" +} + +# VM Network to attach the VMs +variable "vsphere_network" { + type = "string" +} + +# Name/path of RancherOS template to clone VMs from +variable "vsphere_template" { + type = "string" +} + +# Name/path of virtual machine folder to store the VMs in. +# The folder must not exist already. +variable "vsphere_folder" { + default= "rancher-quickstart" +} + +#-----------------------------------------# +# Management +#-----------------------------------------# + +# SSH public key to authorize for SSH access of the nodes +variable "authorized_ssh_key" { + type = "string" +} diff --git a/vsphere/virtual_machines.tf b/vsphere/virtual_machines.tf new file mode 100755 index 00000000..dcbd041c --- /dev/null +++ b/vsphere/virtual_machines.tf @@ -0,0 +1,100 @@ +# Creates the VM inventory folder +resource "vsphere_folder" "folder" { + path = "${var.vsphere_folder}" + type = "vm" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +# Generate UUIDs for all VMs to pass to Cloud-Init +resource "random_uuid" "instance-id" { + count = "${var.rancher_num_cluster_nodes +1}" +} + +# Creates and provisions a VM for the server +resource "vsphere_virtual_machine" "server" { + name = "${local.server_name_prefix}" + resource_pool_id = "${local.pool_id}" + datastore_id = "${data.vsphere_datastore.datastore.id}" + folder = "${vsphere_folder.folder.path}" + + num_cpus = 2 + memory = 4096 + guest_id = "${data.vsphere_virtual_machine.template.guest_id}" + scsi_type = "${data.vsphere_virtual_machine.template.scsi_type}" + + network_interface { + network_id = "${data.vsphere_network.network.id}" + adapter_type = "${data.vsphere_virtual_machine.template.network_interface_types[0]}" + } + + # Required for OVF ISO transport + cdrom { + client_device = true + } + + disk { + label = "disk0" + size = "${data.vsphere_virtual_machine.template.disks.0.size}" + eagerly_scrub = "${data.vsphere_virtual_machine.template.disks.0.eagerly_scrub}" + thin_provisioned = "${data.vsphere_virtual_machine.template.disks.0.thin_provisioned}" + } + + clone { + template_uuid = "${data.vsphere_virtual_machine.template.id}" + linked_clone = false + } + + vapp { + properties { + "instance-id" = "${random_uuid.instance-id.*.result[0]}" + "hostname" = "${local.server_name_prefix}" + "public-keys" = "${var.authorized_ssh_key}" + "user-data" = "${base64encode("${data.template_file.userdata_server.rendered}")}" + } + } +} + +# Creates and provisions VMs for the cluster +resource "vsphere_virtual_machine" "nodes" { + count = "${var.rancher_num_cluster_nodes}" + name = "${local.cluster_nodes_name_prefix}-${count.index + 1}-all" + resource_pool_id = "${local.pool_id}" + datastore_id = "${data.vsphere_datastore.datastore.id}" + folder = "${var.vsphere_folder}" + + num_cpus = "${var.node_num_cpus}" + memory = "${var.node_memory_mb}" + guest_id = "${data.vsphere_virtual_machine.template.guest_id}" + scsi_type = "${data.vsphere_virtual_machine.template.scsi_type}" + + network_interface { + network_id = "${data.vsphere_network.network.id}" + adapter_type = "${data.vsphere_virtual_machine.template.network_interface_types[0]}" + } + + # Required for OVF ISO transport + cdrom { + client_device = true + } + + disk { + label = "disk0" + size = "${data.vsphere_virtual_machine.template.disks.0.size}" + eagerly_scrub = "${data.vsphere_virtual_machine.template.disks.0.eagerly_scrub}" + thin_provisioned = "${data.vsphere_virtual_machine.template.disks.0.thin_provisioned}" + } + + clone { + template_uuid = "${data.vsphere_virtual_machine.template.id}" + linked_clone = false + } + + vapp { + properties { + "instance-id" = "${random_uuid.instance-id.*.result[count.index+1]}" + "hostname" = "${local.cluster_nodes_name_prefix}-${count.index + 1}-all" + "public-keys" = "${var.authorized_ssh_key}" + "user-data" = "${base64encode("${data.template_file.userdata_agent.rendered}")}" + } + } +}