Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions examples/sandbox-aws/application.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
---

# Copyright 2021 Cloudera, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

- name: Coda
hosts: localhost
connection: local
gather_facts: no
become: no
tasks:
- name: Deployment results
debug:
msg: Success!
196 changes: 196 additions & 0 deletions examples/sandbox-aws/definition.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
---
###############################################################################
#Global variables - these can also be set in your profile
#These will take precedence over those set in your profile
###############################################################################
name_prefix: chpe #name prefix for everything, infra, plat, and run
infra_region: us-east-1 #CSP region for infra
infra_type: aws #CSP
cdp_profile: default #CDP Profile to use
aws_profile: default #AWS Profile to use
################################################################################
#Infra Level Configurations
################################################################################
infra:
teardown:
delete_data: yes #Whether to delete the cloud storage (s3) for the env
delete_ssh_key: no #Whether to delete the env ssh key
vpc:
#An additional CIDR to add to the env IP - your personal IP is automatic
extra_cidr: "74.217.76.0/24"
################################################################################
#Platform/Environment Level Configurations
################################################################################
env:
#Override the environment name - the default is <name_prefix>-<CSP>-env
name: "chpe-aw-env"
datalake:
#Override the datalake name - the default is <name_prefix>-<CSP>-dl
name: "chpe-aw-dl"
teardown:
delete_admin_group: yes #Whether to delete the admin group during teardown
delete_user_group: yes #Whether to delete the user group during teardown
delete_cross_account: yes #Whether to delete cross acct role during teardown
delete_credential: yes #Whether to delete the CDP credential during teardown
cdp:
#Admin Group Configuration - Name/Roles/Resource Roles
admin_group:
name: "chpe_demo-aws-prim"
roles:
- PowerUser
#User Group Configuration - Name/Roles/Resource Roles
user_group:
name: "chpe_demos_workers_ww"
roles:
- PowerUser
resource_roles:
- EnvironmentUser
- DEUser
- DWUser
- MLUser
workload_analytics: yes #Where to enable WXM during Environment creation
################################################################################
#Datahub Configurations
################################################################################
datahub:
#List of Datahub Definitions to build
definitions:
#Example of Datahub Creation by Cluster Definition - use CDP defaults for
#instance groups
- definition: "Data Engineering for AWS"
#Override the name of the datahub - default is <name_prefix>-dhub
name: "def-only-dh"
#Example of Datahub Creation by including a bespoke jinja template
#Example can be found in cloudera.exe/roles/runtime/templates/datahub_streams_messaging_light.j2
- include: "datahub_streams_messaging_light.j2"
#Example of Datahub Creation by instance group specification
- template: Flow Management Light Duty
#Provide a custom suffix on the default naming convention - default suffix is dhub
suffix: "in-depth"
instance_groups:
- instanceGroupName: management
- instanceGroupName: nifi_scaling
instanceGroupType: CORE
#Instance Group Name
- instanceGroupName: nifi
#Instance Group Node Count
nodeCount: 3
#Instance Group Type - CORE/GATEWAY
instanceGroupType: CORE
#EBS Volume Configuration
attachedVolumeConfiguration:
#EBS Volume size per node
- volumeSize: 500
#EBS Volume Count per node
volumeCount: 4
#EBS Volume Type - st1/gp2/standard/ephemeral
#ephemeral = ephemeral disk - not EBS
#standard = magnetic
#st1 = througput optimized HDD
#gp2 = general purpose ssd
volumeType: st1
################################################################################
#Operational Database Configurations
################################################################################
opdb:
definitions:
- name: chpe-od #Override OpDB name - default is <name_prefix>-od
- name: chpe-od-2 #Override OpDB name - default is <name_prefix>-od
################################################################################
#Machine Learning Configurations
################################################################################
ml:
#List of ML Workspace Definitions to provision
definitions:
#Workspace Number 1
- tls: yes #Enable TLS?
#Enable Model Monitoring
monitoring: yes
#Enable a public-facing load balancer? Applicable for L0/L1 networks
public_loadbalancer: yes
#Override the default workspace name - default is <name_prefix>-wksp
name: chpe-wksp
#Whether to remove/delete workspace storage (EFS) upon termination
storage: yes
#CPU and GPU Instance Group Definitions
instance_groups:
#CPU Instance Group
- name: cpu_settings
#CPU Instance Type
instanceType: "m5.4xlarge"
#Default Instance Count
instanceCount: 0
#Root Volume Size for Each Instance
rootVolume:
size: 100
#Autoscaling Properties (min/max/enabled)
autoscaling:
minInstances: 0
maxInstances: 10
enabled: true
#GPU Instance Group
- name: gpu_settings
#Default Instance Count
instanceCount: 0
#GPU Instance Type
instanceType: "p2.8xlarge"
#Root Volume Size for Each Instance
rootVolume:
size: 100
#Autoscaling Properties (min/max/enabled)
autoscaling:
minInstances: 0
maxInstances: 2
enabled: true
#List of IP CIDRs to whitelist for public load balancers AND k8s management API
ip_addresses:
- "108.5.26.129/32"
- "1.2.3.4/32"
#Workspace Number 2
- tls: yes
monitoring: yes
public_loadbalancer: yes
#Override the default workspace name - default is <name_prefix>-wksp
name: "my-cml-space"
storage: yes
instance_groups:
- name: cpu_settings
instanceType: "m5.4xlarge"
instanceCount: 1
rootVolume:
size: 100
autoscaling:
minInstances: 1
maxInstances: 5
enabled: true
- name: gpu_settings
instanceCount: 0
instanceType: "p2.8xlarge"
rootVolume:
size: 100
autoscaling:
minInstances: 0
maxInstances: 2
enabled: true
ip_addresses:
- "108.5.26.129/32"
- "1.2.3.4/32"
################################################################################
#Data Warehouse Configurations
################################################################################
dw: #No current configs can be set here (yet)
################################################################################
#Data Flow Configurations
################################################################################
df:
min_k8s_nodes: 3 #Minumum number of k8s nodes for autoscaling
max_k8s_nodes: 5 #Minumum number of k8s nodes for autoscaling
#Enable a public-facing load balancer? Applicable for L0/L1 networks
public_loadbalancer: true
#List of IP CIDRs to whitelist for public load balancers AND k8s management API
ip_ranges:
- "108.5.26.129/32"
- "1.2.3.4/32"
#Whether to remove/delete experience storage upon termination
teardown:
persist: false