examples: remove azure examples

These have now moved into the AzureRM provider repository, linked from the
README here.
This commit is contained in:
Martin Atkins 2017-07-21 17:53:10 -07:00
parent 9be103b62d
commit e079e9d339
121 changed files with 1 additions and 6430 deletions

View File

@ -22,3 +22,4 @@ repositories contain documentation specific to their provider:
* [AliCloud Examples](https://github.com/terraform-providers/terraform-provider-alicloud/tree/master/examples)
* [Amazon Web Services Examples](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples)
* [Azure Examples](https://github.com/terraform-providers/terraform-provider-azurerm/tree/master/examples)

View File

@ -1,22 +0,0 @@
# Create 2 Virtual Machines under a Load balancer and configures Load Balancing rules for the VMs
This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-2-vms-loadbalancer-lbrules) Azure Quickstart Template. Changes to the ARM template may have occured since the creation of this example may not be reflected here.
This template allows you to create 2 Virtual Machines under a Load balancer and configure a load balancing rule on Port 80. This template also deploys a Storage Account, Virtual Network, Public IP address, Availability Set, and Network Interfaces.
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
If you are committing this template to source control, please insure that you add this file to your .gitignore file.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.

View File

@ -1,36 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "cd /data; \
/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan -var dns_name=$KEY -var hostname=$KEY -var lb_ip_dns_name=$KEY -var resource_group=$KEY -var admin_password=$PASSWORD; \
/bin/terraform apply out.tfplan"
# cleanup deployed azure resources via azure-cli
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az network lb show -g $KEY -n rglb; \
az network lb rule list -g $KEY --lb-name rglb;"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force -var dns_name=$KEY -var hostname=$KEY -var lb_ip_dns_name=$KEY -var resource_group=$KEY -var admin_password=$PASSWORD;"

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

View File

@ -1,145 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_storage_account" "stor" {
name = "${var.dns_name}stor"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
account_type = "${var.storage_account_type}"
}
resource "azurerm_availability_set" "avset" {
name = "${var.dns_name}avset"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
platform_fault_domain_count = 2
platform_update_domain_count = 2
managed = true
}
resource "azurerm_public_ip" "lbpip" {
name = "${var.rg_prefix}-ip"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "dynamic"
domain_name_label = "${var.lb_ip_dns_name}"
}
resource "azurerm_virtual_network" "vnet" {
name = "${var.virtual_network_name}"
location = "${var.location}"
address_space = ["${var.address_space}"]
resource_group_name = "${azurerm_resource_group.rg.name}"
}
resource "azurerm_subnet" "subnet" {
name = "${var.rg_prefix}subnet"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "${var.subnet_prefix}"
}
resource "azurerm_lb" "lb" {
resource_group_name = "${azurerm_resource_group.rg.name}"
name = "${var.rg_prefix}lb"
location = "${var.location}"
frontend_ip_configuration {
name = "LoadBalancerFrontEnd"
public_ip_address_id = "${azurerm_public_ip.lbpip.id}"
}
}
resource "azurerm_lb_backend_address_pool" "backend_pool" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.lb.id}"
name = "BackendPool1"
}
resource "azurerm_lb_nat_rule" "tcp" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.lb.id}"
name = "RDP-VM-${count.index}"
protocol = "tcp"
frontend_port = "5000${count.index + 1}"
backend_port = 3389
frontend_ip_configuration_name = "LoadBalancerFrontEnd"
count = 2
}
resource "azurerm_lb_rule" "lb_rule" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.lb.id}"
name = "LBRule"
protocol = "tcp"
frontend_port = 80
backend_port = 80
frontend_ip_configuration_name = "LoadBalancerFrontEnd"
enable_floating_ip = false
backend_address_pool_id = "${azurerm_lb_backend_address_pool.backend_pool.id}"
idle_timeout_in_minutes = 5
probe_id = "${azurerm_lb_probe.lb_probe.id}"
depends_on = ["azurerm_lb_probe.lb_probe"]
}
resource "azurerm_lb_probe" "lb_probe" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.lb.id}"
name = "tcpProbe"
protocol = "tcp"
port = 80
interval_in_seconds = 5
number_of_probes = 2
}
resource "azurerm_network_interface" "nic" {
name = "nic${count.index}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
count = 2
ip_configuration {
name = "ipconfig${count.index}"
subnet_id = "${azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.backend_pool.id}"]
load_balancer_inbound_nat_rules_ids = ["${element(azurerm_lb_nat_rule.tcp.*.id, count.index)}"]
}
}
resource "azurerm_virtual_machine" "vm" {
name = "vm${count.index}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
availability_set_id = "${azurerm_availability_set.avset.id}"
vm_size = "${var.vm_size}"
network_interface_ids = ["${element(azurerm_network_interface.nic.*.id, count.index)}"]
count = 2
storage_image_reference {
publisher = "${var.image_publisher}"
offer = "${var.image_offer}"
sku = "${var.image_sku}"
version = "${var.image_version}"
}
storage_os_disk {
name = "osdisk${count.index}"
create_option = "FromImage"
}
os_profile {
computer_name = "${var.hostname}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
}

View File

@ -1,11 +0,0 @@
output "hostname" {
value = "${var.hostname}"
}
output "vm_fqdn" {
value = "${azurerm_public_ip.lbpip.fqdn}"
}
output "ssh_command" {
value = "ssh ${var.admin_username}@${azurerm_public_ip.lbpip.fqdn}"
}

View File

@ -1,79 +0,0 @@
variable "resource_group" {
description = "The name of the resource group in which to create the virtual network."
}
variable "rg_prefix" {
description = "The shortened abbreviation to represent your resource group that will go on the front of some resources."
default = "rg"
}
variable "hostname" {
description = "VM name referenced also in storage-related names."
}
variable "dns_name" {
description = " Label for the Domain Name. Will be used to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system."
}
variable "lb_ip_dns_name" {
description = "DNS for Load Balancer IP"
}
variable "location" {
description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
default = "southcentralus"
}
variable "virtual_network_name" {
description = "The name for the virtual network."
default = "vnet"
}
variable "address_space" {
description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
default = "10.0.0.0/16"
}
variable "subnet_prefix" {
description = "The address prefix to use for the subnet."
default = "10.0.10.0/24"
}
variable "storage_account_type" {
description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
default = "Standard_LRS"
}
variable "vm_size" {
description = "Specifies the size of the virtual machine."
default = "Standard_D1"
}
variable "image_publisher" {
description = "name of the publisher of the image (az vm image list)"
default = "MicrosoftWindowsServer"
}
variable "image_offer" {
description = "the name of the offer (az vm image list)"
default = "WindowsServer"
}
variable "image_sku" {
description = "image sku to apply (az vm image list)"
default = "2012-R2-Datacenter"
}
variable "image_version" {
description = "version of the image to apply (az vm image list)"
default = "latest"
}
variable "admin_username" {
description = "administrator user name"
default = "vmadmin"
}
variable "admin_password" {
description = "administrator password (recommended to disable password auth)"
}

View File

@ -1,30 +0,0 @@
# Create a CDN Profile, a CDN Endpoint with a Storage Account as origin
This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-cdn-with-storage-account) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected in this Terraform template.
This template creates a [CDN Profile](https://docs.microsoft.com/en-us/azure/cdn/cdn-overview) and a CDN Endpoint with the origin as a Storage Account. Note that the user needs to create a public container in the Storage Account in order for CDN Endpoint to serve content from the Storage Account.
# Important
The endpoint will not immediately be available for use, as it takes time for the registration to propagate through the CDN. For Azure CDN from Akamai profiles, propagation will usually complete within one minute. For Azure CDN from Verizon profiles, propagation will usually complete within 90 minutes, but in some cases can take longer.
Users who try to use the CDN domain name before the endpoint configuration has propagated to the POPs will receive HTTP 404 response codes. If it has been several hours since you created your endpoint and you're still receiving 404 responses, please see [Troubleshooting CDN endpoints returning 404 statuses](https://docs.microsoft.com/en-us/azure/cdn/cdn-troubleshoot-endpoint).
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
![graph](/examples/azure-cdn-with-storage-account/graph.png)

View File

@ -1,29 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan -var resource_group=$KEY; \
/bin/terraform apply out.tfplan"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force -var resource_group=$KEY;"

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

View File

@ -1,39 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_storage_account" "stor" {
name = "${var.resource_group}stor"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
account_type = "${var.storage_account_type}"
}
resource "azurerm_cdn_profile" "cdn" {
name = "${var.resource_group}CdnProfile1"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
sku = "Standard_Akamai"
}
resource "azurerm_cdn_endpoint" "cdnendpt" {
name = "${var.resource_group}CdnEndpoint1"
profile_name = "${azurerm_cdn_profile.cdn.name}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
origin {
name = "${var.resource_group}Origin1"
host_name = "${var.host_name}"
http_port = 80
https_port = 443
}
}

View File

@ -1,3 +0,0 @@
output "CDN Endpoint ID" {
value = "${azurerm_cdn_endpoint.cdnendpt.name}.azureedge.net"
}

View File

@ -1,18 +0,0 @@
variable "resource_group" {
description = "The name of the resource group in which to create the virtual network."
}
variable "location" {
description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
default = "southcentralus"
}
variable "storage_account_type" {
description = "Specifies the type of the storage account"
default = "Standard_LRS"
}
variable "host_name" {
description = "A string that determines the hostname/IP address of the origin server. This string could be a domain name, IPv4 address or IPv6 address."
default = "www.hostnameoforiginserver.com"
}

View File

@ -1,44 +0,0 @@
# Enable encryption on a running Linux VM.
This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-encrypt-running-linux-vm) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected in this Terraform template.
This template enables encryption on a running linux vm using AAD client secret. This template assumes that the VM is located in the same region as the resource group. If not, please edit the template to pass appropriate location for the VM sub-resources.
## Prerequisites:
Azure Disk Encryption securely stores the encryption secrets in a specified Azure Key Vault.
Create the Key Vault and assign appropriate access policies. You may use this script to ensure that your vault is properly configured: [AzureDiskEncryptionPreRequisiteSetup.ps1](https://github.com/Azure/azure-powershell/blob/10fc37e9141af3fde6f6f79b9d46339b73cf847d/src/ResourceManager/Compute/Commands.Compute/Extension/AzureDiskEncryption/Scripts/AzureDiskEncryptionPreRequisiteSetup.ps1)
Use the below PS cmdlet for getting the `key_vault_secret_url` and `key_vault_resource_id`.
```
Get-AzureRmKeyVault -VaultName $KeyVaultName -ResourceGroupName $rgname
```
References:
- [White paper](https://azure.microsoft.com/en-us/documentation/articles/azure-security-disk-encryption/)
- [Explore Azure Disk Encryption with Azure Powershell](https://blogs.msdn.microsoft.com/azuresecurity/2015/11/16/explore-azure-disk-encryption-with-azure-powershell/)
- [Explore Azure Disk Encryption with Azure PowerShell Part 2](http://blogs.msdn.com/b/azuresecurity/archive/2015/11/21/explore-azure-disk-encryption-with-azure-powershell-part-2.aspx)
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file.
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
If you are committing this template to source control, please insure that you add this file to your .gitignore file.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
![graph](/examples/azure-encrypt-running-linux-vm/graph.png)

View File

@ -1,60 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-e KEY_ENCRYPTION_KEY_URL \
-e KEY_VAULT_RESOURCE_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan \
-var resource_group=$KEY \
-var hostname=$KEY \
-var admin_username=$KEY \
-var admin_password=$PASSWORD \
-var passphrase=$PASSWORD \
-var key_vault_name=$KEY_VAULT_NAME \
-var aad_client_id=$ARM_CLIENT_ID \
-var aad_client_secret=$ARM_CLIENT_SECRET \
-var key_encryption_key_url=$KEY_ENCRYPTION_KEY_URL \
-var key_vault_resource_id=$KEY_VAULT_RESOURCE_ID; \
/bin/terraform apply out.tfplan"
# cleanup deployed azure resources via azure-cli
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az vm show -g $KEY -n $KEY; \
az vm encryption show -g $KEY -n $KEY"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-e KEY_ENCRYPTION_KEY_URL \
-e KEY_VAULT_RESOURCE_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force \
-var resource_group=$KEY \
-var hostname=$KEY \
-var admin_username=$KEY \
-var admin_password=$PASSWORD \
-var passphrase=$PASSWORD \
-var key_vault_name=$KEY_VAULT_NAME \
-var aad_client_id=$ARM_CLIENT_ID \
-var aad_client_secret=$ARM_CLIENT_SECRET \
-var key_encryption_key_url=$KEY_ENCRYPTION_KEY_URL \
-var key_vault_resource_id=$KEY_VAULT_RESOURCE_ID;"

View File

@ -1,17 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
export EXISTING_RESOURCE_GROUP=permanent
export KEY_VAULT_NAME=permanentkeyvault
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 405 KiB

View File

@ -1,223 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_virtual_network" "vnet" {
name = "${var.hostname}vnet"
location = "${var.location}"
address_space = ["${var.address_space}"]
resource_group_name = "${azurerm_resource_group.rg.name}"
}
resource "azurerm_subnet" "subnet" {
name = "${var.hostname}subnet"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "${var.subnet_prefix}"
}
resource "azurerm_network_interface" "nic" {
name = "nic"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
ip_configuration {
name = "ipconfig"
subnet_id = "${azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_storage_account" "stor" {
name = "${var.hostname}stor"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "${var.storage_account_type}"
}
resource "azurerm_virtual_machine" "vm" {
name = "${var.hostname}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
vm_size = "${var.vm_size}"
network_interface_ids = ["${azurerm_network_interface.nic.id}"]
storage_image_reference {
publisher = "${var.image_publisher}"
offer = "${var.image_offer}"
sku = "${var.image_sku}"
version = "${var.image_version}"
}
storage_os_disk {
name = "${var.hostname}osdisk"
create_option = "FromImage"
disk_size_gb = "30"
}
os_profile {
computer_name = "${var.hostname}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
os_profile_linux_config {
disable_password_authentication = false
}
}
resource "azurerm_template_deployment" "linux_vm" {
name = "encrypt"
resource_group_name = "${azurerm_resource_group.rg.name}"
deployment_mode = "Incremental"
depends_on = ["azurerm_virtual_machine.vm"]
template_body = <<DEPLOY
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"aadClientID": {
"defaultValue": "${var.aad_client_id}",
"type": "string"
},
"aadClientSecret": {
"defaultValue": "${var.aad_client_secret}",
"type": "string"
},
"diskFormatQuery": {
"defaultValue": "",
"type": "string"
},
"encryptionOperation": {
"allowedValues": [ "EnableEncryption", "EnableEncryptionFormat" ],
"defaultValue": "${var.encryption_operation}",
"type": "string"
},
"volumeType": {
"allowedValues": [ "OS", "Data", "All" ],
"defaultValue": "${var.volume_type}",
"type": "string"
},
"keyEncryptionKeyURL": {
"defaultValue": "${var.key_encryption_key_url}",
"type": "string"
},
"keyVaultName": {
"defaultValue": "${var.key_vault_name}",
"type": "string"
},
"keyVaultResourceGroup": {
"defaultValue": "${azurerm_resource_group.rg.name}",
"type": "string"
},
"passphrase": {
"defaultValue": "${var.passphrase}",
"type": "string"
},
"sequenceVersion": {
"defaultValue": "${var.sequence_version}",
"type": "string"
},
"useKek": {
"allowedValues": [
"nokek",
"kek"
],
"defaultValue": "${var.use_kek}",
"type": "string"
},
"vmName": {
"defaultValue": "${azurerm_virtual_machine.vm.name}",
"type": "string"
},
"_artifactsLocation": {
"type": "string",
"defaultValue": "${var.artifacts_location}"
},
"_artifactsLocationSasToken": {
"type": "string",
"defaultValue": "${var.artifacts_location_sas_token}"
}
},
"variables": {
"extensionName": "${var.extension_name}",
"extensionVersion": "0.1",
"keyEncryptionAlgorithm": "RSA-OAEP",
"keyVaultURL": "https://${var.key_vault_name}.vault.azure.net/",
"keyVaultResourceID": "${var.key_vault_resource_id}",
"updateVmUrl": "${var.artifacts_location}/201-encrypt-running-linux-vm/updatevm-${var.use_kek}.json${var.artifacts_location_sas_token}"
},
"resources": [
{
"type": "Microsoft.Compute/virtualMachines/extensions",
"name": "[concat(parameters('vmName'),'/', variables('extensionName'))]",
"apiVersion": "2015-06-15",
"location": "[resourceGroup().location]",
"properties": {
"protectedSettings": {
"AADClientSecret": "[parameters('aadClientSecret')]",
"Passphrase": "[parameters('passphrase')]"
},
"publisher": "Microsoft.Azure.Security",
"settings": {
"AADClientID": "[parameters('aadClientID')]",
"DiskFormatQuery": "[parameters('diskFormatQuery')]",
"EncryptionOperation": "[parameters('encryptionOperation')]",
"KeyEncryptionAlgorithm": "[variables('keyEncryptionAlgorithm')]",
"KeyEncryptionKeyURL": "[parameters('keyEncryptionKeyURL')]",
"KeyVaultURL": "[variables('keyVaultURL')]",
"SequenceVersion": "[parameters('sequenceVersion')]",
"VolumeType": "[parameters('volumeType')]"
},
"type": "AzureDiskEncryptionForLinux",
"typeHandlerVersion": "[variables('extensionVersion')]"
}
},
{
"apiVersion": "2015-01-01",
"dependsOn": [
"[resourceId('Microsoft.Compute/virtualMachines/extensions', parameters('vmName'), variables('extensionName'))]"
],
"name": "[concat(parameters('vmName'), 'updateVm')]",
"type": "Microsoft.Resources/deployments",
"properties": {
"mode": "Incremental",
"parameters": {
"keyEncryptionKeyURL": {
"value": "[parameters('keyEncryptionKeyURL')]"
},
"keyVaultResourceID": {
"value": "[variables('keyVaultResourceID')]"
},
"keyVaultSecretUrl": {
"value": "[reference(resourceId('Microsoft.Compute/virtualMachines/extensions', parameters('vmName'), variables('extensionName'))).instanceView.statuses[0].message]"
},
"vmName": {
"value": "[parameters('vmName')]"
}
},
"templateLink": {
"contentVersion": "1.0.0.0",
"uri": "[variables('updateVmUrl')]"
}
}
}
],
"outputs": {
"BitLockerKey": {
"type": "string",
"value": "[reference(resourceId('Microsoft.Compute/virtualMachines/extensions', parameters('vmName'), variables('extensionName'))).instanceView.statuses[0].message]"
}
}
}
DEPLOY
}

View File

@ -1,8 +0,0 @@
output "hostname" {
value = "${var.hostname}"
}
output "BitLockerKey" {
value = "${azurerm_template_deployment.linux_vm.outputs["BitLockerKey"]}"
sensitive = true
}

View File

@ -1,125 +0,0 @@
variable "resource_group" {
description = "Resource group name into which your new virtual machine will go."
}
variable "location" {
description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
default = "southcentralus"
}
variable "hostname" {
description = "Used to form various names including the key vault, vm, and storage. Must be unique."
}
variable "address_space" {
description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
default = "10.0.0.0/24"
}
variable "subnet_prefix" {
description = "The address prefix to use for the subnet."
default = "10.0.0.0/24"
}
variable "storage_account_type" {
description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
default = "Standard_LRS"
}
variable "vm_size" {
description = "Specifies the size of the virtual machine. This must be the same as the vm image from which you are copying."
default = "Standard_A0"
}
variable "image_publisher" {
description = "name of the publisher of the image (az vm image list)"
default = "Canonical"
}
variable "image_offer" {
description = "the name of the offer (az vm image list)"
default = "UbuntuServer"
}
variable "image_sku" {
description = "image sku to apply (az vm image list)"
default = "16.04-LTS"
}
variable "image_version" {
description = "version of the image to apply (az vm image list)"
default = "latest"
}
variable "admin_username" {
description = "administrator user name for the vm"
default = "vmadmin"
}
variable "admin_password" {
description = "administrator password for the vm (recommended to disable password auth)"
}
variable "aad_client_id" {
description = "Client ID of AAD app which has permissions to KeyVault"
}
variable "aad_client_secret" {
description = "Client Secret of AAD app which has permissions to KeyVault"
}
variable "disk_format_query" {
description = "The query string used to identify the disks to format and encrypt. This parameter only works when you set the EncryptionOperation as EnableEncryptionFormat. For example, passing [{\"dev_path\":\"/dev/md0\",\"name\":\"encryptedraid\",\"file_system\":\"ext4\"}] will format /dev/md0, encrypt it and mount it at /mnt/dataraid. This parameter should only be used for RAID devices. The specified device must not have any existing filesystem on it."
default = ""
}
variable "encryption_operation" {
description = "EnableEncryption would encrypt the disks in place and EnableEncryptionFormat would format the disks directly"
default = "EnableEncryption"
}
variable "volume_type" {
description = "Defines which drives should be encrypted. OS encryption is supported on RHEL 7.2, CentOS 7.2 & Ubuntu 16.04. Allowed values: OS, Data, All"
default = "All"
}
variable "key_encryption_key_url" {
description = "URL of the KeyEncryptionKey used to encrypt the volume encryption key"
}
variable "key_vault_resource_id" {
description = "uri of Azure key vault resource"
}
variable "key_vault_name" {
description = "name of Azure key vault resource"
}
variable "passphrase" {
description = "The passphrase for the disks"
}
variable "extension_name" {
description = "the name of the vm extension"
default = "AzureDiskEncryptionForLinux"
}
variable "sequence_version" {
description = "sequence version of the bitlocker operation. Increment this everytime an operation is performed on the same VM"
default = 1
}
variable "use_kek" {
description = "Select kek if the secret should be encrypted with a key encryption key. Allowed values: kek, nokek"
default = "kek"
}
variable "artifacts_location" {
description = "The base URI where artifacts required by this template are located. When the template is deployed using the accompanying scripts, a private location in the subscription will be used and this value will be automatically generated."
default = "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master"
}
variable "artifacts_location_sas_token" {
description = "The sasToken required to access _artifactsLocation. When the template is deployed using the accompanying scripts, a sasToken will be automatically generated."
default = ""
}

View File

@ -1,114 +0,0 @@
# OpenShift Origin Deployment Template
This Terraform template was based on [this](https://github.com/Microsoft/openshift-origin) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected here.
## OpenShift Origin with Username / Password
Current template deploys OpenShift Origin 1.5 RC0.
This template deploys OpenShift Origin with basic username / password for authentication to OpenShift. You can select to use either CentOS or RHEL for the OS. It includes the following resources:
|Resource |Properties |
|-------------------|------------------------------------------------------------------------------------------------------------------------------------|
|Virtual Network |**Address prefix:** 10.0.0.0/16<br />**Master subnet:** 10.0.0.0/24<br />**Node subnet:** 10.0.1.0/24 |
|Load Balancer |2 probes and two rules for TCP 80 and TCP 443 |
|Public IP Addresses|OpenShift Master public IP<br />OpenShift Router public IP attached to Load Balancer |
|Storage Accounts |2 Storage Accounts |
|Virtual Machines |Single master<br />User-defined number of nodes<br />All VMs include a single attached data disk for Docker thin pool logical volume|
If you have a Red Hat subscription and would like to deploy an OpenShift Container Platform (formerly OpenShift Enterprise) cluster, please visit: https://github.com/Microsoft/openshift-container-platform
### Generate SSH Keys
You'll need to generate an SSH key pair in order to provision this template. Ensure that you do not include a passcode with the private key. <br/>
If you are using a Windows computer, you can download `puttygen.exe`. You will need to export to OpenSSH (from Conversions menu) to get a valid Private Key for use in the Template.<br/>
From a Linux or Mac, you can just use the `ssh-keygen` command. Once you are finished deploying the cluster, you can always generate a new key pair that uses a passphrase and replaces the original one used during initial deployment.
### Create Key Vault to store SSH Private Key
You will need to create a Key Vault to store your SSH Private Key that will then be used as part of the deployment.
1. **Create Key Vault using Powershell**<br/>
a. Create new resource group: New-AzureRMResourceGroup -Name 'ResourceGroupName' -Location 'West US'<br/>
b. Create key vault: New-AzureRmKeyVault -VaultName 'KeyVaultName' -ResourceGroup 'ResourceGroupName' -Location 'West US'<br/>
c. Create variable with sshPrivateKey: $securesecret = ConvertTo-SecureString -String '[copy ssh Private Key here - including line feeds]' -AsPlainText -Force<br/>
d. Create Secret: Set-AzureKeyVaultSecret -Name 'SecretName' -SecretValue $securesecret -VaultName 'KeyVaultName'<br/>
e. Enable the Key Vault for Template Deployments: Set-AzureRmKeyVaultAccessPolicy -VaultName 'KeyVaultName' -ResourceGroupName 'ResourceGroupName' -EnabledForTemplateDeployment
2. **Create Key Vault using Azure CLI 1.0**<br/>
a. Create new Resource Group: azure group create \<name\> \<location\><br/>
Ex: `azure group create ResourceGroupName 'East US'`<br/>
b. Create Key Vault: azure keyvault create -u \<vault-name\> -g \<resource-group\> -l \<location\><br/>
Ex: `azure keyvault create -u KeyVaultName -g ResourceGroupName -l 'East US'`<br/>
c. Create Secret: azure keyvault secret set -u \<vault-name\> -s \<secret-name\> --file \<private-key-file-name\><br/>
Ex: `azure keyvault secret set -u KeyVaultName -s SecretName --file ~/.ssh/id_rsa`<br/>
d. Enable the Keyvvault for Template Deployment: azure keyvault set-policy -u \<vault-name\> --enabled-for-template-deployment true<br/>
Ex: `azure keyvault set-policy -u KeyVaultName --enabled-for-template-deployment true`<br/>
3. **Create Key Vault using Azure CLI 2.0**<br/>
a. Create new Resource Group: az group create -n \<name\> -l \<location\><br/>
Ex: `az group create -n ResourceGroupName -l 'East US'`<br/>
b. Create Key Vault: az keyvault create -n \<vault-name\> -g \<resource-group\> -l \<location\> --enabled-for-template-deployment true<br/>
Ex: `az keyvault create -n KeyVaultName -g ResourceGroupName -l 'East US' --enabled-for-template-deployment true`<br/>
c. Create Secret: az keyvault secret set --vault-name \<vault-name\> -n \<secret-name\> --file \<private-key-file-name\><br/>
Ex: `az keyvault secret set --vault-name KeyVaultName -n SecretName --file ~/.ssh/id_rsa`<br/>
3. **Clone the Openshift repository [here](https://github.com/Microsoft/openshift-origin)**<br/>
a. Note the local script path, this will be needed for remote-execs on the remote machines.<br/>
## Deploy Template
Once you have collected all of the prerequisites for the template, you can deploy the template via terraform.
Monitor deployment via Terraform and get the console URL from outputs of successful deployment which will look something like (if using sample parameters file and "West US 2" location):
`https://me-master1.westus2.cloudapp.azure.com:8443/console`
The cluster will use self-signed certificates. Accept the warning and proceed to the login page.
### NOTE
Ensure combination of openshiftMasterPublicIpDnsLabelPrefix, and nodeLbPublicIpDnsLabelPrefix parameters, combined with the deployment location give you globally unique URL for the cluster or deployment will fail at the step of allocating public IPs with fully-qualified-domain-names as above.
### NOTE
This template deploys a bastion host, merely for the connection provisioner and allowing remote-exec to run commands on machines without public IPs; notice the specific dependencies on the order in which VMs are created for this to work properly.
### NOTE
The OpenShift Ansible playbook does take a while to run when using VMs backed by Standard Storage. VMs backed by Premium Storage are faster. If you want Premimum Storage, select a DS or GS series VM.
<hr />
Be sure to follow the OpenShift instructions to create the ncessary DNS entry for the OpenShift Router for access to applications.
## Post-Deployment Operations
This template creates an OpenShift user but does not make it a full OpenShift user. To do that, please perform the following.
1. SSH in to master node
2. Execute the following command:
```sh
sudo oadm policy add-cluster-role-to-user cluster-admin <user>
```
### Additional OpenShift Configuration Options
You can configure additional settings per the official [OpenShift Origin Documentation](https://docs.openshift.org/latest/welcome/index.html).
Few options you have
1. Deployment Output
a. openshiftConsoleUrl the openshift console url<br/>
b. openshiftMasterSsh ssh command for master node<br/>
c. openshiftNodeLoadBalancerFQDN node load balancer<br/>
get the deployment output data
a. portal.azure.com -> choose 'Resource groups' select your group select 'Deployments' and there the deployment 'Microsoft.Template'. As output from the deployment it contains information about the openshift console url, ssh command and load balancer url.<br/>
b. With the Azure CLI : azure group deployment list &lt;resource group name>
2. add additional users. you can find much detail about this in the openshift.org documentation under 'Cluster Administration' and 'Managing Users'. This installation uses htpasswd as the identity provider. To add more user ssh in to master node and execute following command:
```sh
sudo htpasswd /etc/origin/master/htpasswd user1
```
Now this user can login with the 'oc' CLI tool or the openshift console url.

View File

@ -1,46 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-e AAD_CLIENT_ID \
-e AAD_CLIENT_SECRET \
-e KEY_ENCRYPTION_KEY_URL \
-e SSH_PUBLIC_KEY \
-v /:/data \
--workdir=/data/$(pwd) \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan \
-var subscription_id=$ARM_SUBSCRIPTION_ID \
-var tenant_id=$ARM_TENANT_ID \
-var aad_client_id=$ARM_CLIENT_ID \
-var aad_client_secret=$ARM_CLIENT_SECRET \
-var resource_group_name=$KEY \
-var key_vault_name=$KEY_VAULT_NAME \
-var key_vault_resource_group=$KEY_VAULT_RESOURCE_GROUP \
-var key_vault_secret=$KEY_VAULT_SECRET \
-var openshift_cluster_prefix=$KEY \
-var openshift_password=$PASSWORD \
-var openshift_script_path=$LOCAL_SCRIPT_PATH \
-var ssh_public_key=\"$OS_PUBLIC_KEY\" \
-var connection_private_ssh_key_path=$CONTAINER_PRIVATE_KEY_PATH \
-var master_instance_count=$MASTER_COUNT \
-var infra_instance_count=$INFRA_COUNT \
-var node_instance_count=$NODE_COUNT; \
/bin/terraform apply out.tfplan;"
# cleanup deployed azure resources via azure-cli
# docker run --rm -it \
# azuresdk/azure-cli-python \
# sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
# az vm show -g $KEY -n $KEY; \
# az vm encryption show -g $KEY -n $KEY"
# cleanup deployed azure resources via terraform

View File

@ -1,24 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD="P4ssw0rd1"
export KEY_VAULT_RESOURCE_GROUP=permanent
export KEY_VAULT_NAME=TerraformVault
export KEY_VAULT_SECRET=OpenShiftSSH
export OS_PUBLIC_KEY='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCvdOGspeeBhsOZw6OK2WmP3bBUOeZj1yaz6Dw+lzsRmjwNSmJIoGZPzlbdy1lzlkXIm2JaT4h/cUi39w+Q2RZRjxmr7TbLyuidJfFLvRJ35RDullUYLWEPx3csBroPkCv+0qgmTW/MqqjqS4yhlJ01uc9RNx9Jt3XZN7LNr8SUoBzdLCWJa1rpCTtUckO1Jyzi4VwZ2ek+nYPJuJ8hG0KeHnyXDXV4hQZTFtGvtbmgoyoybppFQMbM3a31KZeaWXUeZkZczBsdNRkX8XCDjb6zUmUMQUzZpalFlL1O+rZD0kaXKr0uZWiYOKu2LjnWeDW9x4tig1mf+L84vniP+lLKFW8na3Lzx11ysEpuhIJGPMMI8sjTCnu51PmiwHW2U9OR06skPUO7ZGD0QHg7jKXdz5bHT+1OqXeAStULDiPVRIPrxxpurPXiJRm7JPbPvPqrMqZJ3K7J9W6OGHG3CoDR5RfYlPWURTaVH10stb4hKevasCd+YoLStB1XgMaL/cG9bM0TIWmODV/+pfn800PgxeBn1vABpL0NF8K2POLs37vGJoh/RyGCDVd0HEKArpZj0/g+fv7tr3tFFOCY5bHSuDTZcY8sWPhxKXSismoApM3a+USF5HkDkWSTEiETs2wgUdTSt4MuN2maRXOK2JboQth1Qw+vCOvqcls0dMa0NQ== you@example.com'
export CONTAINER_PRIVATE_KEY_PATH="/data/Users/$USER/.ssh/id_rsa"
export LOCAL_SCRIPT_PATH="/data/Users/$USER/Code/10thmagnitude/openshift-origin/scripts"
export MASTER_COUNT=1
export INFRA_COUNT=1
export NODE_COUNT=1
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

View File

@ -1,826 +0,0 @@
provider "azurerm" {
subscription_id = "${var.subscription_id}"
client_id = "${var.aad_client_id}"
client_secret = "${var.aad_client_secret}"
tenant_id = "${var.tenant_id}"
}
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group_name}"
location = "${var.resource_group_location}"
}
# ******* NETWORK SECURITY GROUPS ***********
resource "azurerm_network_security_group" "master_nsg" {
name = "${var.openshift_cluster_prefix}-master-nsg"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
security_rule {
name = "allow_SSH_in_all"
description = "Allow SSH in from all locations"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "allow_HTTPS_all"
description = "Allow HTTPS connections from all locations"
priority = 200
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "443"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "allow_OpenShift_console_in_all"
description = "Allow OpenShift Console connections from all locations"
priority = 300
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "8443"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_network_security_group" "infra_nsg" {
name = "${var.openshift_cluster_prefix}-infra-nsg"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
security_rule {
name = "allow_SSH_in_all"
description = "Allow SSH in from all locations"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "allow_HTTPS_all"
description = "Allow HTTPS connections from all locations"
priority = 200
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "443"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "allow_HTTP_in_all"
description = "Allow HTTP connections from all locations"
priority = 300
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "80"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_network_security_group" "node_nsg" {
name = "${var.openshift_cluster_prefix}-node-nsg"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
security_rule {
name = "allow_SSH_in_all"
description = "Allow SSH in from all locations"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "allow_HTTPS_all"
description = "Allow HTTPS connections from all locations"
priority = 200
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "443"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "allow_HTTP_in_all"
description = "Allow HTTP connections from all locations"
priority = 300
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "80"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
# ******* STORAGE ACCOUNTS ***********
resource "azurerm_storage_account" "bastion_storage_account" {
name = "${var.openshift_cluster_prefix}bsa"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "${var.storage_account_type_map["${var.bastion_vm_size}"]}"
}
resource "azurerm_storage_account" "master_storage_account" {
name = "${var.openshift_cluster_prefix}msa"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "${var.storage_account_type_map["${var.master_vm_size}"]}"
}
resource "azurerm_storage_account" "infra_storage_account" {
name = "${var.openshift_cluster_prefix}infrasa"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "${var.storage_account_type_map["${var.infra_vm_size}"]}"
}
resource "azurerm_storage_account" "nodeos_storage_account" {
name = "${var.openshift_cluster_prefix}nodeossa"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "${var.storage_account_type_map["${var.node_vm_size}"]}"
}
resource "azurerm_storage_account" "nodedata_storage_account" {
name = "${var.openshift_cluster_prefix}nodedatasa"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "${var.storage_account_type_map["${var.node_vm_size}"]}"
}
resource "azurerm_storage_account" "registry_storage_account" {
name = "${var.openshift_cluster_prefix}regsa"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "Standard_LRS"
}
resource "azurerm_storage_account" "persistent_volume_storage_account" {
name = "${var.openshift_cluster_prefix}pvsa"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "Standard_LRS"
}
# ******* AVAILABILITY SETS ***********
resource "azurerm_availability_set" "master" {
name = "masteravailabilityset"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
}
resource "azurerm_availability_set" "infra" {
name = "infraavailabilityset"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
}
resource "azurerm_availability_set" "node" {
name = "nodeavailabilityset"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
}
# ******* IP ADDRESSES ***********
resource "azurerm_public_ip" "bastion_pip" {
name = "bastionpip"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
public_ip_address_allocation = "Static"
domain_name_label = "${var.openshift_cluster_prefix}-bastion"
}
resource "azurerm_public_ip" "openshift_master_pip" {
name = "masterpip"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
public_ip_address_allocation = "Static"
domain_name_label = "${var.openshift_cluster_prefix}"
}
resource "azurerm_public_ip" "infra_lb_pip" {
name = "infraip"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
public_ip_address_allocation = "Static"
domain_name_label = "${var.openshift_cluster_prefix}infrapip"
}
# ******* VNETS / SUBNETS ***********
resource "azurerm_virtual_network" "vnet" {
name = "openshiftvnet"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_space = ["10.0.0.0/8"]
depends_on = ["azurerm_virtual_network.vnet"]
}
resource "azurerm_subnet" "master_subnet" {
name = "mastersubnet"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "10.1.0.0/16"
depends_on = ["azurerm_virtual_network.vnet"]
}
resource "azurerm_subnet" "node_subnet" {
name = "nodesubnet"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "10.2.0.0/16"
}
# ******* MASTER LOAD BALANCER ***********
resource "azurerm_lb" "master_lb" {
name = "masterloadbalancer"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
depends_on = ["azurerm_public_ip.openshift_master_pip"]
frontend_ip_configuration {
name = "LoadBalancerFrontEnd"
public_ip_address_id = "${azurerm_public_ip.openshift_master_pip.id}"
}
}
resource "azurerm_lb_backend_address_pool" "master_lb" {
resource_group_name = "${azurerm_resource_group.rg.name}"
name = "loadBalancerBackEnd"
loadbalancer_id = "${azurerm_lb.master_lb.id}"
depends_on = ["azurerm_lb.master_lb"]
}
resource "azurerm_lb_probe" "master_lb" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.master_lb.id}"
name = "8443Probe"
port = 8443
interval_in_seconds = 5
number_of_probes = 2
protocol = "Tcp"
depends_on = ["azurerm_lb.master_lb"]
}
resource "azurerm_lb_rule" "master_lb" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.master_lb.id}"
name = "OpenShiftAdminConsole"
protocol = "Tcp"
frontend_port = 8443
backend_port = 8443
frontend_ip_configuration_name = "LoadBalancerFrontEnd"
backend_address_pool_id = "${azurerm_lb_backend_address_pool.master_lb.id}"
load_distribution = "SourceIP"
idle_timeout_in_minutes = 30
probe_id = "${azurerm_lb_probe.master_lb.id}"
enable_floating_ip = false
depends_on = ["azurerm_lb_probe.master_lb", "azurerm_lb.master_lb", "azurerm_lb_backend_address_pool.master_lb"]
}
resource "azurerm_lb_nat_rule" "master_lb" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.master_lb.id}"
name = "${azurerm_lb.master_lb.name}-SSH-${count.index}"
protocol = "Tcp"
frontend_port = "${count.index + 2200}"
backend_port = 22
frontend_ip_configuration_name = "LoadBalancerFrontEnd"
count = "${var.master_instance_count}"
depends_on = ["azurerm_lb.master_lb"]
}
# ******* INFRA LOAD BALANCER ***********
resource "azurerm_lb" "infra_lb" {
name = "infraloadbalancer"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
depends_on = ["azurerm_public_ip.infra_lb_pip"]
frontend_ip_configuration {
name = "LoadBalancerFrontEnd"
public_ip_address_id = "${azurerm_public_ip.infra_lb_pip.id}"
}
}
resource "azurerm_lb_backend_address_pool" "infra_lb" {
resource_group_name = "${azurerm_resource_group.rg.name}"
name = "loadBalancerBackEnd"
loadbalancer_id = "${azurerm_lb.infra_lb.id}"
depends_on = ["azurerm_lb.infra_lb"]
}
resource "azurerm_lb_probe" "infra_lb_http_probe" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.infra_lb.id}"
name = "httpProbe"
port = 80
interval_in_seconds = 5
number_of_probes = 2
protocol = "Tcp"
depends_on = ["azurerm_lb.infra_lb"]
}
resource "azurerm_lb_probe" "infra_lb_https_probe" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.infra_lb.id}"
name = "httpsProbe"
port = 443
interval_in_seconds = 5
number_of_probes = 2
protocol = "Tcp"
}
resource "azurerm_lb_rule" "infra_lb_http" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.infra_lb.id}"
name = "OpenShiftRouterHTTP"
protocol = "Tcp"
frontend_port = 80
backend_port = 80
frontend_ip_configuration_name = "LoadBalancerFrontEnd"
backend_address_pool_id = "${azurerm_lb_backend_address_pool.infra_lb.id}"
probe_id = "${azurerm_lb_probe.infra_lb_http_probe.id}"
depends_on = ["azurerm_lb_probe.infra_lb_http_probe", "azurerm_lb.infra_lb", "azurerm_lb_backend_address_pool.infra_lb"]
}
resource "azurerm_lb_rule" "infra_lb_https" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.infra_lb.id}"
name = "OpenShiftRouterHTTPS"
protocol = "Tcp"
frontend_port = 443
backend_port = 443
frontend_ip_configuration_name = "LoadBalancerFrontEnd"
backend_address_pool_id = "${azurerm_lb_backend_address_pool.infra_lb.id}"
probe_id = "${azurerm_lb_probe.infra_lb_https_probe.id}"
depends_on = ["azurerm_lb_probe.infra_lb_https_probe", "azurerm_lb_backend_address_pool.infra_lb"]
}
# ******* NETWORK INTERFACES ***********
resource "azurerm_network_interface" "bastion_nic" {
name = "bastionnic${count.index}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_security_group_id = "${azurerm_network_security_group.master_nsg.id}"
ip_configuration {
name = "bastionip${count.index}"
subnet_id = "${azurerm_subnet.master_subnet.id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${azurerm_public_ip.bastion_pip.id}"
}
}
resource "azurerm_network_interface" "master_nic" {
name = "masternic${count.index}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_security_group_id = "${azurerm_network_security_group.master_nsg.id}"
count = "${var.master_instance_count}"
ip_configuration {
name = "masterip${count.index}"
subnet_id = "${azurerm_subnet.master_subnet.id}"
private_ip_address_allocation = "Dynamic"
load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.master_lb.id}"]
load_balancer_inbound_nat_rules_ids = ["${element(azurerm_lb_nat_rule.master_lb.*.id, count.index)}"]
}
}
resource "azurerm_network_interface" "infra_nic" {
name = "infra_nic${count.index}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_security_group_id = "${azurerm_network_security_group.infra_nsg.id}"
count = "${var.infra_instance_count}"
ip_configuration {
name = "infraip${count.index}"
subnet_id = "${azurerm_subnet.master_subnet.id}"
private_ip_address_allocation = "Dynamic"
load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.infra_lb.id}"]
}
}
resource "azurerm_network_interface" "node_nic" {
name = "node_nic${count.index}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_security_group_id = "${azurerm_network_security_group.node_nsg.id}"
count = "${var.node_instance_count}"
ip_configuration {
name = "nodeip${count.index}"
subnet_id = "${azurerm_subnet.node_subnet.id}"
private_ip_address_allocation = "Dynamic"
}
}
# ******* Bastion Host *******
resource "azurerm_virtual_machine" "bastion" {
name = "${var.openshift_cluster_prefix}-bastion-1"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_interface_ids = ["${azurerm_network_interface.bastion_nic.id}"]
vm_size = "${var.bastion_vm_size}"
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
tags {
displayName = "${var.openshift_cluster_prefix}-bastion VM Creation"
}
os_profile {
computer_name = "${var.openshift_cluster_prefix}-bastion-${count.index}"
admin_username = "${var.admin_username}"
admin_password = "${var.openshift_password}"
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/${var.admin_username}/.ssh/authorized_keys"
key_data = "${var.ssh_public_key}"
}
}
storage_image_reference {
publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}"
offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}"
sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}"
version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}"
}
storage_os_disk {
name = "${var.openshift_cluster_prefix}-master-osdisk${count.index}"
vhd_uri = "${azurerm_storage_account.bastion_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-bastion-osdisk.vhd"
caching = "ReadWrite"
create_option = "FromImage"
disk_size_gb = 60
}
}
# ******* Master VMs *******
resource "azurerm_virtual_machine" "master" {
name = "${var.openshift_cluster_prefix}-master-${count.index}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
availability_set_id = "${azurerm_availability_set.master.id}"
network_interface_ids = ["${element(azurerm_network_interface.master_nic.*.id, count.index)}"]
vm_size = "${var.master_vm_size}"
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
count = "${var.master_instance_count}"
depends_on = ["azurerm_virtual_machine.infra", "azurerm_virtual_machine.node"]
tags {
displayName = "${var.openshift_cluster_prefix}-master VM Creation"
}
connection {
host = "${azurerm_public_ip.openshift_master_pip.fqdn}"
user = "${var.admin_username}"
port = 2200
private_key = "${file(var.connection_private_ssh_key_path)}"
}
provisioner "file" {
source = "${var.openshift_script_path}/masterPrep.sh"
destination = "masterPrep.sh"
}
provisioner "file" {
source = "${var.openshift_script_path}/deployOpenShift.sh"
destination = "deployOpenShift.sh"
}
provisioner "remote-exec" {
inline = [
"chmod +x masterPrep.sh",
"chmod +x deployOpenShift.sh",
"sudo bash masterPrep.sh \"${azurerm_storage_account.persistent_volume_storage_account.name}\" \"${var.admin_username}\" && sudo bash deployOpenShift.sh \"${var.admin_username}\" \"${var.openshift_password}\" \"${var.key_vault_secret}\" \"${var.openshift_cluster_prefix}-master\" \"${azurerm_public_ip.openshift_master_pip.fqdn}\" \"${azurerm_public_ip.openshift_master_pip.ip_address}\" \"${var.openshift_cluster_prefix}-infra\" \"${var.openshift_cluster_prefix}-node\" \"${var.node_instance_count}\" \"${var.infra_instance_count}\" \"${var.master_instance_count}\" \"${var.default_sub_domain_type}\" \"${azurerm_storage_account.registry_storage_account.name}\" \"${azurerm_storage_account.registry_storage_account.primary_access_key}\" \"${var.tenant_id}\" \"${var.subscription_id}\" \"${var.aad_client_id}\" \"${var.aad_client_secret}\" \"${azurerm_resource_group.rg.name}\" \"${azurerm_resource_group.rg.location}\" \"${var.key_vault_name}\""
]
}
os_profile {
computer_name = "${var.openshift_cluster_prefix}-master-${count.index}"
admin_username = "${var.admin_username}"
admin_password = "${var.openshift_password}"
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/${var.admin_username}/.ssh/authorized_keys"
key_data = "${var.ssh_public_key}"
}
}
storage_image_reference {
publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}"
offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}"
sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}"
version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}"
}
storage_os_disk {
name = "${var.openshift_cluster_prefix}-master-osdisk${count.index}"
vhd_uri = "${azurerm_storage_account.master_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-master-osdisk${count.index}.vhd"
caching = "ReadWrite"
create_option = "FromImage"
disk_size_gb = 60
}
storage_data_disk {
name = "${var.openshift_cluster_prefix}-master-docker-pool${count.index}"
vhd_uri = "${azurerm_storage_account.master_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-master-docker-pool${count.index}.vhd"
disk_size_gb = "${var.data_disk_size}"
create_option = "Empty"
lun = 0
}
}
# ******* Infra VMs *******
resource "azurerm_virtual_machine" "infra" {
name = "${var.openshift_cluster_prefix}-infra-${count.index}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
availability_set_id = "${azurerm_availability_set.infra.id}"
network_interface_ids = ["${element(azurerm_network_interface.infra_nic.*.id, count.index)}"]
vm_size = "${var.infra_vm_size}"
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
count = "${var.infra_instance_count}"
tags {
displayName = "${var.openshift_cluster_prefix}-infra VM Creation"
}
connection {
type = "ssh"
bastion_host = "${azurerm_public_ip.bastion_pip.fqdn}"
bastion_user = "${var.admin_username}"
bastion_private_key = "${file(var.connection_private_ssh_key_path)}"
host = "${element(azurerm_network_interface.infra_nic.*.private_ip_address, count.index)}"
user = "${var.admin_username}"
private_key = "${file(var.connection_private_ssh_key_path)}"
}
provisioner "file" {
source = "${var.openshift_script_path}/nodePrep.sh"
destination = "nodePrep.sh"
}
provisioner "remote-exec" {
inline = [
"chmod +x nodePrep.sh",
"sudo bash nodePrep.sh",
]
}
os_profile {
computer_name = "${var.openshift_cluster_prefix}-infra-${count.index}"
admin_username = "${var.admin_username}"
admin_password = "${var.openshift_password}"
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/${var.admin_username}/.ssh/authorized_keys"
key_data = "${var.ssh_public_key}"
}
}
storage_image_reference {
publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}"
offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}"
sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}"
version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}"
}
storage_os_disk {
name = "${var.openshift_cluster_prefix}-infra-osdisk${count.index}"
vhd_uri = "${azurerm_storage_account.infra_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-infra-osdisk${count.index}.vhd"
caching = "ReadWrite"
create_option = "FromImage"
}
storage_data_disk {
name = "${var.openshift_cluster_prefix}-infra-docker-pool"
vhd_uri = "${azurerm_storage_account.infra_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-infra-docker-pool${count.index}.vhd"
disk_size_gb = "${var.data_disk_size}"
create_option = "Empty"
lun = 0
}
}
# ******* Node VMs *******
resource "azurerm_virtual_machine" "node" {
name = "${var.openshift_cluster_prefix}-node-${count.index}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
availability_set_id = "${azurerm_availability_set.node.id}"
network_interface_ids = ["${element(azurerm_network_interface.node_nic.*.id, count.index)}"]
vm_size = "${var.node_vm_size}"
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
count = "${var.node_instance_count}"
tags {
displayName = "${var.openshift_cluster_prefix}-node VM Creation"
}
connection {
type = "ssh"
bastion_host = "${azurerm_public_ip.bastion_pip.fqdn}"
bastion_user = "${var.admin_username}"
bastion_private_key = "${file(var.connection_private_ssh_key_path)}"
host = "${element(azurerm_network_interface.node_nic.*.private_ip_address, count.index)}"
user = "${var.admin_username}"
private_key = "${file(var.connection_private_ssh_key_path)}"
}
provisioner "file" {
source = "${var.openshift_script_path}/nodePrep.sh"
destination = "nodePrep.sh"
}
provisioner "remote-exec" {
inline = [
"chmod +x nodePrep.sh",
"sudo bash nodePrep.sh",
]
}
os_profile {
computer_name = "${var.openshift_cluster_prefix}-node-${count.index}"
admin_username = "${var.admin_username}"
admin_password = "${var.openshift_password}"
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/${var.admin_username}/.ssh/authorized_keys"
key_data = "${var.ssh_public_key}"
}
}
storage_image_reference {
publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}"
offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}"
sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}"
version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}"
}
storage_os_disk {
name = "${var.openshift_cluster_prefix}-node-osdisk"
vhd_uri = "${azurerm_storage_account.nodeos_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-node-osdisk${count.index}.vhd"
caching = "ReadWrite"
create_option = "FromImage"
}
storage_data_disk {
name = "${var.openshift_cluster_prefix}-node-docker-pool${count.index}"
vhd_uri = "${azurerm_storage_account.nodeos_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-node-docker-pool${count.index}.vhd"
disk_size_gb = "${var.data_disk_size}"
create_option = "Empty"
lun = 0
}
}
# ******* VM EXTENSIONS *******
# resource "azurerm_virtual_machine_extension" "deploy_open_shift_master" {
# name = "masterOpShExt${count.index}"
# location = "${azurerm_resource_group.rg.location}"
# resource_group_name = "${azurerm_resource_group.rg.name}"
# virtual_machine_name = "${element(azurerm_virtual_machine.master.*.name, count.index)}"
# publisher = "Microsoft.Azure.Extensions"
# type = "CustomScript"
# type_handler_version = "2.0"
# auto_upgrade_minor_version = true
# depends_on = ["azurerm_virtual_machine.master", "azurerm_virtual_machine_extension.node_prep", "azurerm_storage_container.vhds", "azurerm_virtual_machine_extension.deploy_infra"]
#
# settings = <<SETTINGS
# {
# "fileUris": [
# "${var.artifacts_location}scripts/masterPrep.sh",
# "${var.artifacts_location}scripts/deployOpenShift.sh"
# ]
# }
# SETTINGS
#
# protected_settings = <<SETTINGS
# {
# "commandToExecute": "bash masterPrep.sh ${azurerm_storage_account.persistent_volume_storage_account.name} ${var.admin_username} && bash deployOpenShift.sh \"${var.admin_username}\" '${var.openshift_password}' \"${var.key_vault_secret}\" \"${var.openshift_cluster_prefix}-master\" \"${azurerm_public_ip.openshift_master_pip.fqdn}\" \"${azurerm_public_ip.openshift_master_pip.ip_address}\" \"${var.openshift_cluster_prefix}-infra\" \"${var.openshift_cluster_prefix}-node\" \"${var.node_instance_count}\" \"${var.infra_instance_count}\" \"${var.master_instance_count}\" \"${var.default_sub_domain_type}\" \"${azurerm_storage_account.registry_storage_account.name}\" \"${azurerm_storage_account.registry_storage_account.primary_access_key}\" \"${var.tenant_id}\" \"${var.subscription_id}\" \"${var.aad_client_id}\" \"${var.aad_client_secret}\" \"${azurerm_resource_group.rg.name}\" \"${azurerm_resource_group.rg.location}\" \"${var.key_vault_name}\""
# }
# SETTINGS
# }
# resource "azurerm_virtual_machine_extension" "deploy_infra" {
# name = "infraOpShExt${count.index}"
# location = "${azurerm_resource_group.rg.location}"
# resource_group_name = "${azurerm_resource_group.rg.name}"
# virtual_machine_name = "${element(azurerm_virtual_machine.infra.*.name, count.index)}"
# publisher = "Microsoft.Azure.Extensions"
# type = "CustomScript"
# type_handler_version = "2.0"
# auto_upgrade_minor_version = true
# depends_on = ["azurerm_virtual_machine.infra"]
#
# settings = <<SETTINGS
# {
# "fileUris": [
# "${var.artifacts_location}scripts/nodePrep.sh"
# ]
# }
# SETTINGS
#
# protected_settings = <<SETTINGS
# {
# "commandToExecute": "bash nodePrep.sh"
# }
# SETTINGS
# }
# resource "azurerm_virtual_machine_extension" "node_prep" {
# name = "nodePrepExt${count.index}"
# location = "${azurerm_resource_group.rg.location}"
# resource_group_name = "${azurerm_resource_group.rg.name}"
# virtual_machine_name = "${element(azurerm_virtual_machine.node.*.name, count.index)}"
# publisher = "Microsoft.Azure.Extensions"
# type = "CustomScript"
# type_handler_version = "2.0"
# auto_upgrade_minor_version = true
# depends_on = ["azurerm_virtual_machine.node", "azurerm_storage_account.nodeos_storage_account"]
#
# settings = <<SETTINGS
# {
# "fileUris": [
# "${var.artifacts_location}scripts/nodePrep.sh"
# ]
# }
# SETTINGS
#
# protected_settings = <<SETTINGS
# {
# "commandToExecute": "bash nodePrep.sh"
# }
# SETTINGS
# }

View File

@ -1,23 +0,0 @@
output "openshift_console_url" {
value = "https://${azurerm_public_ip.openshift_master_pip.fqdn}:8443/console"
}
output "openshift_master_ssh" {
value = "ssh ${var.admin_username}@${azurerm_public_ip.openshift_master_pip.fqdn} -p 2200"
}
output "openshift_infra_load_balancer_fqdn" {
value = "${azurerm_public_ip.infra_lb_pip.fqdn}"
}
output "node_os_storage_account_name" {
value = "${azurerm_storage_account.nodeos_storage_account.name}"
}
output "node_data_storage_account_name" {
value = "${azurerm_storage_account.nodedata_storage_account.name}"
}
output "infra_storage_account_name" {
value = "${azurerm_storage_account.infra_storage_account.name}"
}

View File

@ -1,206 +0,0 @@
variable "resource_group_name" {
description = "Name of the azure resource group in which you will deploy this template."
}
variable "resource_group_location" {
description = "Location of the azure resource group."
default = "southcentralus"
}
variable "subscription_id" {
description = "Subscription ID of the key vault"
}
variable "tenant_id" {
description = "Tenant ID with access to your key vault and subscription"
}
variable "openshift_script_path" {
description = "Local path to openshift scripts to prep nodes and install openshift origin"
}
variable "os_image" {
description = "Select from CentOS (centos) or RHEL (rhel) for the Operating System"
default = "centos"
}
variable "bastion_vm_size" {
description = "Size of the Bastion Virtual Machine. Allowed values: Standard_A4, Standard_A5, Standard_A6, Standard_A7, Standard_A8, Standard_A9, Standard_A10, Standard_A11, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_D5_v2, Standard_D11_v2, Standard_D12_v2, Standard_D13_v2, Standard_D14_v2, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_D1_v2, Standard_DS2, Standard_DS3, Standard_DS4, Standard_DS11, Standard_DS12, Standard_DS13, Standard_DS14, Standard_DS1_v2, Standard_DS2_v2, Standard_DS3_v2, Standard_DS4_v2, Standard_DS5_v2, Standard_DS11_v2, Standard_DS12_v2, Standard_DS13_v2, Standard_DS14_v2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5"
default = "Standard_D2_v2"
}
variable "master_vm_size" {
description = "Size of the Master Virtual Machine. Allowed values: Standard_A4, Standard_A5, Standard_A6, Standard_A7, Standard_A8, Standard_A9, Standard_A10, Standard_A11, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_D5_v2, Standard_D11_v2, Standard_D12_v2, Standard_D13_v2, Standard_D14_v2, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_D1_v2, Standard_DS2, Standard_DS3, Standard_DS4, Standard_DS11, Standard_DS12, Standard_DS13, Standard_DS14, Standard_DS1_v2, Standard_DS2_v2, Standard_DS3_v2, Standard_DS4_v2, Standard_DS5_v2, Standard_DS11_v2, Standard_DS12_v2, Standard_DS13_v2, Standard_DS14_v2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5"
default = "Standard_DS4_v2"
}
variable "infra_vm_size" {
description = "Size of the Infra Virtual Machine. Allowed values: Standard_A4, Standard_A5, Standard_A6, Standard_A7, Standard_A8, Standard_A9, Standard_A10, Standard_A11,Standard_D1, Standard_D2, Standard_D3, Standard_D4,Standard_D11, Standard_D12, Standard_D13, Standard_D14,Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_D5_v2,Standard_D11_v2, Standard_D12_v2, Standard_D13_v2, Standard_D14_v2,Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5,Standard_D1_v2, Standard_DS2, Standard_DS3, Standard_DS4,Standard_DS11, Standard_DS12, Standard_DS13, Standard_DS14,Standard_DS1_v2, Standard_DS2_v2, Standard_DS3_v2, Standard_DS4_v2, Standard_DS5_v2,Standard_DS11_v2, Standard_DS12_v2, Standard_DS13_v2, Standard_DS14_v2,Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5"
default = "Standard_DS3_v2"
}
variable "node_vm_size" {
description = "Size of the Node Virtual Machine. Allowed values: Standard_A4, Standard_A5, Standard_A6, Standard_A7, Standard_A8, Standard_A9, Standard_A10, Standard_A11, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_D5_v2, Standard_D11_v2, Standard_D12_v2, Standard_D13_v2, Standard_D14_v2, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_D1_v2, Standard_DS2, Standard_DS3, Standard_DS4, Standard_DS11, Standard_DS12, Standard_DS13, Standard_DS14, Standard_DS1_v2, Standard_DS2_v2, Standard_DS3_v2, Standard_DS4_v2, Standard_DS5_v2, Standard_DS11_v2, Standard_DS12_v2, Standard_DS13_v2, Standard_DS14_v2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5"
default = "Standard_DS3_v2"
}
variable "storage_account_type_map" {
description = "This is the storage account type that you will need based on the vm size that you choose (value constraints)"
type = "map"
default = {
Standard_A4 = "Standard_LRS"
Standard_A5 = "Standard_LRS"
Standard_A6 = "Standard_LRS"
Standard_A7 = "Standard_LRS"
Standard_A8 = "Standard_LRS"
Standard_A9 = "Standard_LRS"
Standard_A10 = "Standard_LRS"
Standard_A11 = "Standard_LRS"
Standard_D1 = "Standard_LRS"
Standard_D2 = "Standard_LRS"
Standard_D3 = "Standard_LRS"
Standard_D4 = "Standard_LRS"
Standard_D11 = "Standard_LRS"
Standard_D12 = "Standard_LRS"
Standard_D13 = "Standard_LRS"
Standard_D14 = "Standard_LRS"
Standard_D1_v2 = "Standard_LRS"
Standard_D2_v2 = "Standard_LRS"
Standard_D3_v2 = "Standard_LRS"
Standard_D4_v2 = "Standard_LRS"
Standard_D5_v2 = "Standard_LRS"
Standard_D11_v2 = "Standard_LRS"
Standard_D12_v2 = "Standard_LRS"
Standard_D13_v2 = "Standard_LRS"
Standard_D14_v2 = "Standard_LRS"
Standard_G1 = "Standard_LRS"
Standard_G2 = "Standard_LRS"
Standard_G3 = "Standard_LRS"
Standard_G4 = "Standard_LRS"
Standard_G5 = "Standard_LRS"
Standard_DS1 = "Premium_LRS"
Standard_DS2 = "Premium_LRS"
Standard_DS3 = "Premium_LRS"
Standard_DS4 = "Premium_LRS"
Standard_DS11 = "Premium_LRS"
Standard_DS12 = "Premium_LRS"
Standard_DS13 = "Premium_LRS"
Standard_DS14 = "Premium_LRS"
Standard_DS1_v2 = "Premium_LRS"
Standard_DS2_v2 = "Premium_LRS"
Standard_DS3_v2 = "Premium_LRS"
Standard_DS4_v2 = "Premium_LRS"
Standard_DS5_v2 = "Premium_LRS"
Standard_DS11_v2 = "Premium_LRS"
Standard_DS12_v2 = "Premium_LRS"
Standard_DS13_v2 = "Premium_LRS"
Standard_DS14_v2 = "Premium_LRS"
Standard_DS15_v2 = "Premium_LRS"
Standard_GS1 = "Premium_LRS"
Standard_GS2 = "Premium_LRS"
Standard_GS3 = "Premium_LRS"
Standard_GS4 = "Premium_LRS"
Standard_GS5 = "Premium_LRS"
}
}
variable "os_image_map" {
description = "os image map"
type = "map"
default = {
centos_publisher = "Openlogic"
centos_offer = "CentOS"
centos_sku = "7.3"
centos_version = "latest"
rhel_publisher = "RedHat"
rhel_offer = "RHEL"
rhel_sku = "7.2"
rhel_version = "latest"
}
}
variable "disk_size_gb" {
description = "storage os disk size"
default = 60
}
variable "openshift_cluster_prefix" {
description = "Cluster Prefix used to configure domain name label and hostnames for all nodes - master, infra and nodes. Between 1 and 20 characters"
}
variable "master_instance_count" {
description = "Number of OpenShift Masters nodes to deploy. 1 is non HA and 3 is for HA."
default = 1
}
variable "infra_instance_count" {
description = "Number of OpenShift infra nodes to deploy. 1 is non HA. Choose 2 or 3 for HA."
default = 1
}
variable "node_instance_count" {
description = "Number of OpenShift nodes to deploy. Allowed values: 1-30"
default = 1
}
variable "data_disk_size" {
description = "Size of data disk to attach to nodes for Docker volume - valid sizes are 128 GB, 512 GB and 1023 GB"
default = 128
}
variable "admin_username" {
description = "Admin username for both OS login and OpenShift login"
default = "ocpadmin"
}
variable "openshift_password" {
description = "Password for OpenShift login"
}
variable "ssh_public_key" {
description = "Path to your SSH Public Key"
}
variable "connection_private_ssh_key_path" {
description = "Path to the private ssh key used to connect to machines within the OpenShift cluster."
}
variable "key_vault_resource_group" {
description = "The name of the Resource Group that contains the Key Vault"
}
variable "key_vault_name" {
description = "The name of the Key Vault you will use"
}
variable "key_vault_secret" {
description = "The Secret Name you used when creating the Secret (that contains the Private Key)"
}
variable "aad_client_id" {
description = "Azure Active Directory Client ID also known as Application ID for Service Principal"
}
variable "aad_client_secret" {
description = "Azure Active Directory Client Secret for Service Principal"
}
variable "default_sub_domain_type" {
description = "This will either be 'xipio' (if you don't have your own domain) or 'custom' if you have your own domain that you would like to use for routing"
default = "xipio"
}
variable "default_sub_domain" {
description = "The wildcard DNS name you would like to use for routing if you selected 'custom' above. If you selected 'xipio' above, then this field will be ignored"
default = "contoso.com"
}
variable "api_version_compute" {
default = "2015-06-15"
}
variable "api_version" {
default = "2015-01-01"
}

View File

@ -1,28 +0,0 @@
# Azure Search service
This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/bf842409eeeeb7c4523add3922b204793eb4d85f/101-azure-search-create) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected in this Terraform template.
This template creates a new Azure Search Service.
If you are unclear as to what parameters are allowed you can check the [Azure Search Management REST API docs on MSDN](https://msdn.microsoft.com/en-us/library/azure/dn832687.aspx).
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file.
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
![graph](/examples/azure-search-create/graph.png)

View File

@ -1,36 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan -var search_name=$KEY -var resource_group=$KEY; \
/bin/terraform apply out.tfplan; \
/bin/terraform show;"
# cleanup deployed azure resources via azure-cli
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az resource list --name $KEY"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force -var search_name=$KEY -var resource_group=$KEY;"

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

View File

@ -1,20 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_search_service" "search" {
name = "${var.search_name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${var.location}"
sku = "${var.sku}"
replica_count = "${var.replica_count}"
partition_count = "${var.partition_count}"
}

View File

@ -1,3 +0,0 @@
output "Azure Search Service" {
value = "${azurerm_search_service.search.name}"
}

View File

@ -1,32 +0,0 @@
variable "resource_group" {
description = "The name of the resource group in which to create search service"
}
variable "location" {
description = "The location/region where the search service is created. Changing this forces a new resource to be created."
default = "southcentralus"
}
variable "search_name" {
description = "Service name must only contain lowercase letters, digits or dashes, cannot use dash as the first two or last one characters, cannot contain consecutive dashes, and is limited between 2 and 60 characters in length."
}
variable "sku" {
description = "Valid values are 'free', 'standard', 'standard2', and 'standard3' (2 & 3 must be enabled on the backend by Microsoft support). 'free' provisions the service in shared clusters. 'standard' provisions the service in dedicated clusters."
default = "standard"
}
variable "replica_count" {
description = "Replicas distribute search workloads across the service. You need 2 or more to support high availability (applies to Basic and Standard only)."
default = 1
}
variable "partition_count" {
description = "Partitions allow for scaling of document count as well as faster indexing by sharding your index over multiple Azure Search units. Allowed values: 1, 2, 3, 4, 6, 12"
default = 1
}
variable "hosting_mode" {
description = "Applicable only for SKU set to standard3. You can set this property to enable a single, high density partition that allows up to 1000 indexes, which is much higher than the maximum indexes allowed for any other SKU. Allowed values: default, highDensity"
default = "default"
}

View File

@ -1,22 +0,0 @@
# 201 Create a Servicebus with Topic and Subscription
For information about using this template, see [Create a Service Bus namespace with Topic and Subscription using an ARM template](http://azure.microsoft.com/documentation/articles/service-bus-resource-manager-namespace-topic/).
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
![graph](/examples/azure-servicebus-create-topic-and-subscription/graph.png)

View File

@ -1,31 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan -var unique=$KEY -var resource_group=$KEY; \
/bin/terraform apply out.tfplan; \
/bin/terraform show;"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force -var unique=$KEY -var resource_group=$KEY;"

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

View File

@ -1,37 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_servicebus_namespace" "test" {
depends_on = ["azurerm_resource_group.rg"]
name = "${var.unique}servicebus"
location = "${var.location}"
resource_group_name = "${var.resource_group}"
sku = "standard"
}
resource "azurerm_servicebus_topic" "test" {
name = "${var.unique}Topic"
location = "${var.location}"
resource_group_name = "${var.resource_group}"
namespace_name = "${azurerm_servicebus_namespace.test.name}"
enable_partitioning = true
}
resource "azurerm_servicebus_subscription" "test" {
name = "${var.unique}Subscription"
location = "${var.location}"
resource_group_name = "${var.resource_group}"
namespace_name = "${azurerm_servicebus_namespace.test.name}"
topic_name = "${azurerm_servicebus_topic.test.name}"
max_delivery_count = 1
}

View File

@ -1,7 +0,0 @@
output "Namespace Connection String" {
value = "${azurerm_servicebus_namespace.test.default_primary_connection_string}"
}
output "Shared Access Policy PrimaryKey" {
value = "${azurerm_servicebus_namespace.test.default_primary_key}"
}

View File

@ -1,12 +0,0 @@
variable "resource_group" {
description = "The name of the resource group in which to create the Service Bus"
}
variable "location" {
description = "The location/region where the Service Bus is created. Changing this forces a new resource to be created."
default = "southcentralus"
}
variable "unique" {
description = "a unique string that will be used to comprise the names of the Service Bus, Topic, and Subscription name spaces"
}

View File

@ -1,67 +0,0 @@
# Spark & Cassandra on CentOS 7.x
This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/spark-and-cassandra-on-centos) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected here.
This project configures a Spark cluster (1 master and n-slave nodes) and a single node Cassandra on Azure using CentOS 7.x. The base image starts with CentOS 7.3, and it is updated to the latest version as part of the provisioning steps.
Please note that [Azure Resource Manager][3] is used to provision the environment.
### Software ###
| Category | Software | Version | Notes |
| --- | --- | --- | --- |
| Operating System | CentOS | 7.x | Based on CentOS 7.1 but it will be auto upgraded to the lastest point release |
| Java | OpenJDK | 1.8.0 | Installed on all servers |
| Spark | Spark | 1.6.0 with Hadoop 2.6 | The installation contains libraries needed for Hadoop 2.6 |
| Cassandra | Cassandra | 3.2 | Installed through DataStax's YUM repository |
### Defaults ###
| Component | Setting | Default | Notes |
| --- | --- | --- | --- |
| Spark - Master | VM Size | Standard D1 V2 | |
| Spark - Master | Storage | Standard LRS | |
| Spark - Master | Internal IP | 10.0.0.5 | |
| Spark - Master | Service User Account | spark | Password-less access |
| | | |
| Spark - Slave | VM Size | Standard D3 V2 | |
| Spark - Slave | Storage | Standard LRS | |
| Spark - Slave | Internal IP Range | 10.0.1.5 - 10.0.1.255 | |
| Spark - Slave | # of Nodes | 2 | Maximum of 200 |
| Spark - Slave | Availability | 2 fault domains, 5 update domains | |
| Spark - Slave | Service User Account | spark | Password-less access |
| | | |
| Cassandra | VM Size | Standard D3 V2 | |
| Cassandra | Storage | Standard LRS | |
| Cassandra | Internal IP | 10.2.0.5 | |
| Cassandra | Service User Account | cassandra | Password-less access |
## Prerequisites
1. Ensure you have an Azure subscription.
2. Ensure you have enough available vCPU cores on your subscription. Otherwise, you will receive an error during the process. The number of cores can be increased through a support ticket in Azure Portal.
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
## Post-Deployment
1. All servers will have a public IP and SSH port enabled by default. These can be disabled or modified in the template or by using Azure Portal.
2. All servers are configured with the same username and password. You may SSH into each server and ensure connectivity.
3. Spark WebUI is running on **port 8080**. Access it using MASTER_WEB_UI_PUBLIC_IP:8080 on your browser. Public IP is available in the outputs as well as through Azure Portal.
4. Delete the Resource Group that was created to stage the provisioning scripts.

View File

@ -1,43 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan \
-var resource_group=$KEY \
-var unique_prefix=$KEY \
-var vm_admin_username=$KEY \
-var vm_admin_password=$PASSWORD; \
/bin/terraform apply out.tfplan"
# cleanup deployed azure resources via azure-cli
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az vm list -g $KEY"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force \
-var resource_group=$KEY \
-var unique_prefix=$KEY \
-var vm_admin_username=$KEY \
-var vm_admin_password=$PASSWORD;"

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

View File

@ -1,405 +0,0 @@
# provider "azurerm" {
# subscription_id = "${var.subscription_id}"
# client_id = "${var.client_id}"
# client_secret = "${var.client_secret}"
# tenant_id = "${var.tenant_id}"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
# ********************** NETWORK SECURITY GROUPS ********************** #
resource "azurerm_network_security_group" "master" {
name = "${var.nsg_spark_master_name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
security_rule {
name = "ssh"
description = "Allow SSH"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "Internet"
destination_address_prefix = "*"
}
security_rule {
name = "http_webui_spark"
description = "Allow Web UI Access to Spark"
priority = 101
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "8080"
source_address_prefix = "Internet"
destination_address_prefix = "*"
}
security_rule {
name = "http_rest_spark"
description = "Allow REST API Access to Spark"
priority = 102
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "6066"
source_address_prefix = "Internet"
destination_address_prefix = "*"
}
}
resource "azurerm_network_security_group" "slave" {
name = "${var.nsg_spark_slave_name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
security_rule {
name = "ssh"
description = "Allow SSH"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "Internet"
destination_address_prefix = "*"
}
}
resource "azurerm_network_security_group" "cassandra" {
name = "${var.nsg_cassandra_name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
security_rule {
name = "ssh"
description = "Allow SSH"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "Internet"
destination_address_prefix = "*"
}
}
# ********************** VNET / SUBNETS ********************** #
resource "azurerm_virtual_network" "spark" {
name = "vnet-spark"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
address_space = ["${var.vnet_spark_prefix}"]
}
resource "azurerm_subnet" "subnet1" {
name = "${var.vnet_spark_subnet1_name}"
virtual_network_name = "${azurerm_virtual_network.spark.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "${var.vnet_spark_subnet1_prefix}"
network_security_group_id = "${azurerm_network_security_group.master.id}"
depends_on = ["azurerm_virtual_network.spark"]
}
resource "azurerm_subnet" "subnet2" {
name = "${var.vnet_spark_subnet2_name}"
virtual_network_name = "${azurerm_virtual_network.spark.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "${var.vnet_spark_subnet2_prefix}"
}
resource "azurerm_subnet" "subnet3" {
name = "${var.vnet_spark_subnet3_name}"
virtual_network_name = "${azurerm_virtual_network.spark.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "${var.vnet_spark_subnet3_prefix}"
}
# ********************** PUBLIC IP ADDRESSES ********************** #
resource "azurerm_public_ip" "master" {
name = "${var.public_ip_master_name}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "Static"
}
resource "azurerm_public_ip" "slave" {
name = "${var.public_ip_slave_name_prefix}${count.index}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "Static"
count = "${var.vm_number_of_slaves}"
}
resource "azurerm_public_ip" "cassandra" {
name = "${var.public_ip_cassandra_name}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "Static"
}
# ********************** NETWORK INTERFACE ********************** #
resource "azurerm_network_interface" "master" {
name = "${var.nic_master_name}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_security_group_id = "${azurerm_network_security_group.master.id}"
depends_on = ["azurerm_virtual_network.spark", "azurerm_public_ip.master", "azurerm_network_security_group.master"]
ip_configuration {
name = "ipconfig1"
subnet_id = "${azurerm_subnet.subnet1.id}"
private_ip_address_allocation = "Static"
private_ip_address = "${var.nic_master_node_ip}"
public_ip_address_id = "${azurerm_public_ip.master.id}"
}
}
resource "azurerm_network_interface" "slave" {
name = "${var.nic_slave_name_prefix}${count.index}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_security_group_id = "${azurerm_network_security_group.slave.id}"
count = "${var.vm_number_of_slaves}"
depends_on = ["azurerm_virtual_network.spark", "azurerm_public_ip.slave", "azurerm_network_security_group.slave"]
ip_configuration {
name = "ipconfig1"
subnet_id = "${azurerm_subnet.subnet2.id}"
private_ip_address_allocation = "Static"
private_ip_address = "${var.nic_slave_node_ip_prefix}${5 + count.index}"
public_ip_address_id = "${element(azurerm_public_ip.slave.*.id, count.index)}"
}
}
resource "azurerm_network_interface" "cassandra" {
name = "${var.nic_cassandra_name}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_security_group_id = "${azurerm_network_security_group.cassandra.id}"
depends_on = ["azurerm_virtual_network.spark", "azurerm_public_ip.cassandra", "azurerm_network_security_group.cassandra"]
ip_configuration {
name = "ipconfig1"
subnet_id = "${azurerm_subnet.subnet3.id}"
private_ip_address_allocation = "Static"
private_ip_address = "${var.nic_cassandra_node_ip}"
public_ip_address_id = "${azurerm_public_ip.cassandra.id}"
}
}
# ********************** AVAILABILITY SET ********************** #
resource "azurerm_availability_set" "slave" {
name = "${var.availability_slave_name}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
platform_update_domain_count = 5
platform_fault_domain_count = 2
}
# ********************** STORAGE ACCOUNTS ********************** #
resource "azurerm_storage_account" "master" {
name = "master${var.unique_prefix}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "${var.storage_master_type}"
}
resource "azurerm_storage_container" "master" {
name = "${var.vm_master_storage_account_container_name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
storage_account_name = "${azurerm_storage_account.master.name}"
container_access_type = "private"
depends_on = ["azurerm_storage_account.master"]
}
resource "azurerm_storage_account" "slave" {
name = "slave${var.unique_prefix}${count.index}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
count = "${var.vm_number_of_slaves}"
account_type = "${var.storage_slave_type}"
}
resource "azurerm_storage_container" "slave" {
name = "${var.vm_slave_storage_account_container_name}${count.index}"
resource_group_name = "${azurerm_resource_group.rg.name}"
storage_account_name = "${element(azurerm_storage_account.slave.*.name, count.index)}"
container_access_type = "private"
depends_on = ["azurerm_storage_account.slave"]
}
resource "azurerm_storage_account" "cassandra" {
name = "cassandra${var.unique_prefix}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "${var.storage_cassandra_type}"
}
resource "azurerm_storage_container" "cassandra" {
name = "${var.vm_cassandra_storage_account_container_name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
storage_account_name = "${azurerm_storage_account.cassandra.name}"
container_access_type = "private"
depends_on = ["azurerm_storage_account.cassandra"]
}
# ********************** MASTER VIRTUAL MACHINE ********************** #
resource "azurerm_virtual_machine" "master" {
name = "${var.vm_master_name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
vm_size = "${var.vm_master_vm_size}"
network_interface_ids = ["${azurerm_network_interface.master.id}"]
depends_on = ["azurerm_storage_account.master", "azurerm_network_interface.master", "azurerm_storage_container.master"]
storage_image_reference {
publisher = "${var.os_image_publisher}"
offer = "${var.os_image_offer}"
sku = "${var.os_version}"
version = "latest"
}
storage_os_disk {
name = "${var.vm_master_os_disk_name}"
vhd_uri = "http://${azurerm_storage_account.master.name}.blob.core.windows.net/${azurerm_storage_container.master.name}/${var.vm_master_os_disk_name}.vhd"
create_option = "FromImage"
caching = "ReadWrite"
}
os_profile {
computer_name = "${var.vm_master_name}"
admin_username = "${var.vm_admin_username}"
admin_password = "${var.vm_admin_password}"
}
os_profile_linux_config {
disable_password_authentication = false
}
connection {
type = "ssh"
host = "${azurerm_public_ip.master.ip_address}"
user = "${var.vm_admin_username}"
password = "${var.vm_admin_password}"
}
provisioner "remote-exec" {
inline = [
"wget ${var.artifacts_location}${var.script_spark_provisioner_script_file_name}",
"echo ${var.vm_admin_password} | sudo -S sh ./${var.script_spark_provisioner_script_file_name} -runas=master -master=${var.nic_master_node_ip}",
]
}
}
# ********************** SLAVE VIRTUAL MACHINES ********************** #
resource "azurerm_virtual_machine" "slave" {
name = "${var.vm_slave_name_prefix}${count.index}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
vm_size = "${var.vm_slave_vm_size}"
network_interface_ids = ["${element(azurerm_network_interface.slave.*.id, count.index)}"]
count = "${var.vm_number_of_slaves}"
availability_set_id = "${azurerm_availability_set.slave.id}"
depends_on = ["azurerm_storage_account.slave", "azurerm_network_interface.slave", "azurerm_storage_container.slave"]
storage_image_reference {
publisher = "${var.os_image_publisher}"
offer = "${var.os_image_offer}"
sku = "${var.os_version}"
version = "latest"
}
storage_os_disk {
name = "${var.vm_slave_os_disk_name_prefix}${count.index}"
vhd_uri = "http://${element(azurerm_storage_account.slave.*.name, count.index)}.blob.core.windows.net/${element(azurerm_storage_container.slave.*.name, count.index)}/${var.vm_slave_os_disk_name_prefix}.vhd"
create_option = "FromImage"
caching = "ReadWrite"
}
os_profile {
computer_name = "${var.vm_slave_name_prefix}${count.index}"
admin_username = "${var.vm_admin_username}"
admin_password = "${var.vm_admin_password}"
}
os_profile_linux_config {
disable_password_authentication = false
}
connection {
type = "ssh"
host = "${element(azurerm_public_ip.slave.*.ip_address, count.index)}"
user = "${var.vm_admin_username}"
password = "${var.vm_admin_password}"
}
provisioner "remote-exec" {
inline = [
"wget ${var.artifacts_location}${var.script_spark_provisioner_script_file_name}",
"echo ${var.vm_admin_password} | sudo -S sh ./${var.script_spark_provisioner_script_file_name} -runas=slave -master=${var.nic_master_node_ip}",
]
}
}
# ********************** CASSANDRA VIRTUAL MACHINE ********************** #
resource "azurerm_virtual_machine" "cassandra" {
name = "${var.vm_cassandra_name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
vm_size = "${var.vm_cassandra_vm_size}"
network_interface_ids = ["${azurerm_network_interface.cassandra.id}"]
depends_on = ["azurerm_storage_account.cassandra", "azurerm_network_interface.cassandra", "azurerm_storage_container.cassandra"]
storage_image_reference {
publisher = "${var.os_image_publisher}"
offer = "${var.os_image_offer}"
sku = "${var.os_version}"
version = "latest"
}
storage_os_disk {
name = "${var.vm_cassandra_os_disk_name}"
vhd_uri = "http://${azurerm_storage_account.cassandra.name}.blob.core.windows.net/${azurerm_storage_container.cassandra.name}/${var.vm_cassandra_os_disk_name}.vhd"
create_option = "FromImage"
caching = "ReadWrite"
}
os_profile {
computer_name = "${var.vm_cassandra_name}"
admin_username = "${var.vm_admin_username}"
admin_password = "${var.vm_admin_password}"
}
os_profile_linux_config {
disable_password_authentication = false
}
connection {
type = "ssh"
host = "${azurerm_public_ip.cassandra.ip_address}"
user = "${var.vm_admin_username}"
password = "${var.vm_admin_password}"
}
provisioner "remote-exec" {
inline = [
"wget ${var.artifacts_location}${var.script_cassandra_provisioner_script_file_name}",
"echo ${var.vm_admin_password} | sudo -S sh ./${var.script_cassandra_provisioner_script_file_name}",
]
}
}

View File

@ -1,15 +0,0 @@
output "resource_group" {
value = "${var.resource_group}"
}
output "master_ip_address" {
value = "${azurerm_public_ip.master.ip_address}"
}
output "master_ssh_command" {
value = "ssh ${var.vm_admin_username}@${azurerm_public_ip.master.ip_address}"
}
output "master_web_ui_public_ip" {
value = "${azurerm_public_ip.master.ip_address}:8080"
}

View File

@ -1,234 +0,0 @@
variable "resource_group" {
description = "Resource group name into which your Spark and Cassandra deployment will go."
}
variable "location" {
description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
default = "southcentralus"
}
variable "unique_prefix" {
description = "This prefix is used for names which need to be globally unique."
}
variable "storage_master_type" {
description = "Storage type that is used for master Spark node. This storage account is used to store VM disks. Allowed values: Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS"
default = "Standard_LRS"
}
variable "storage_slave_type" {
description = "Storage type that is used for each of the slave Spark node. This storage account is used to store VM disks. Allowed values : Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS"
default = "Standard_LRS"
}
variable "storage_cassandra_type" {
description = "Storage type that is used for Cassandra. This storage account is used to store VM disks. Allowed values: Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS"
default = "Standard_LRS"
}
variable "vm_master_vm_size" {
description = "VM size for master Spark node. This VM can be sized smaller. Allowed values: Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_D5_v2, Standard_D11_v2, Standard_D12_v2, Standard_D13_v2, Standard_D14_v2, Standard_A8, Standard_A9, Standard_A10, Standard_A11"
default = "Standard_D1_v2"
}
variable "vm_number_of_slaves" {
description = "Number of VMs to create to support the slaves. Each slave is created on it's own VM. Minimum of 2 & Maximum of 200 VMs. min = 2, max = 200"
default = 2
}
variable "vm_slave_vm_size" {
description = "VM size for slave Spark nodes. This VM should be sized based on workloads. Allowed values: Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_D5_v2, Standard_D11_v2, Standard_D12_v2, Standard_D13_v2, Standard_D14_v2, Standard_A8, Standard_A9, Standard_A10, Standard_A11"
default = "Standard_D3_v2"
}
variable "vm_cassandra_vm_size" {
description = "VM size for Cassandra node. This VM should be sized based on workloads. Allowed values: Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_D5_v2, Standard_D11_v2, Standard_D12_v2, Standard_D13_v2, Standard_D14_v2, Standard_A8, Standard_A9, Standard_A10, Standard_A11"
default = "Standard_D3_v2"
}
variable "vm_admin_username" {
description = "Specify an admin username that should be used to login to the VM. Min length: 1"
}
variable "vm_admin_password" {
description = "Specify an admin password that should be used to login to the VM. Must be between 6-72 characters long and must satisfy at least 3 of password complexity requirements from the following: 1) Contains an uppercase character 2) Contains a lowercase character 3) Contains a numeric digit 4) Contains a special character"
}
variable "os_image_publisher" {
description = "name of the publisher of the image (az vm image list)"
default = "OpenLogic"
}
variable "os_image_offer" {
description = "the name of the offer (az vm image list)"
default = "CentOS"
}
variable "os_version" {
description = "version of the image to apply (az vm image list)"
default = "7.3"
}
variable "api_version" {
default = "2015-06-15"
}
variable "artifacts_location" {
description = "The base URI where artifacts required by this template are located."
default = "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/spark-and-cassandra-on-centos/CustomScripts/"
}
variable "vnet_spark_prefix" {
description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
default = "10.0.0.0/16"
}
variable "vnet_spark_subnet1_name" {
description = "The name used for the Master subnet."
default = "Subnet-Master"
}
variable "vnet_spark_subnet1_prefix" {
description = "The address prefix to use for the Master subnet."
default = "10.0.0.0/24"
}
variable "vnet_spark_subnet2_name" {
description = "The name used for the slave/agent subnet."
default = "Subnet-Slave"
}
variable "vnet_spark_subnet2_prefix" {
description = "The address prefix to use for the slave/agent subnet."
default = "10.0.1.0/24"
}
variable "vnet_spark_subnet3_name" {
description = "The name used for the subnet used by Cassandra."
default = "Subnet-Cassandra"
}
variable "vnet_spark_subnet3_prefix" {
description = "The address prefix to use for the subnet used by Cassandra."
default = "10.0.2.0/24"
}
variable "nsg_spark_master_name" {
description = "The name of the network security group for Spark's Master"
default = "nsg-spark-master"
}
variable "nsg_spark_slave_name" {
description = "The name of the network security group for Spark's slave/agent nodes"
default = "nsg-spark-slave"
}
variable "nsg_cassandra_name" {
description = "The name of the network security group for Cassandra"
default = "nsg-cassandra"
}
variable "nic_master_name" {
description = "The name of the network interface card for Master"
default = "nic-master"
}
variable "nic_master_node_ip" {
description = "The private IP address used by the Master's network interface card"
default = "10.0.0.5"
}
variable "nic_cassandra_name" {
description = "The name of the network interface card used by Cassandra"
default = "nic-cassandra"
}
variable "nic_cassandra_node_ip" {
description = "The private IP address of Cassandra's network interface card"
default = "10.0.2.5"
}
variable "nic_slave_name_prefix" {
description = "The prefix used to constitute the slave/agents' names"
default = "nic-slave-"
}
variable "nic_slave_node_ip_prefix" {
description = "The prefix of the private IP address used by the network interface card of the slave/agent nodes"
default = "10.0.1."
}
variable "public_ip_master_name" {
description = "The name of the master node's public IP address"
default = "public-ip-master"
}
variable "public_ip_slave_name_prefix" {
description = "The prefix to the slave/agent nodes' IP address names"
default = "public-ip-slave-"
}
variable "public_ip_cassandra_name" {
description = "The name of Cassandra's node's public IP address"
default = "public-ip-cassandra"
}
variable "vm_master_name" {
description = "The name of Spark's Master virtual machine"
default = "spark-master"
}
variable "vm_master_os_disk_name" {
description = "The name of the os disk used by Spark's Master virtual machine"
default = "vmMasterOSDisk"
}
variable "vm_master_storage_account_container_name" {
description = "The name of the storage account container used by Spark's master"
default = "vhds"
}
variable "vm_slave_name_prefix" {
description = "The name prefix used by Spark's slave/agent nodes"
default = "spark-slave-"
}
variable "vm_slave_os_disk_name_prefix" {
description = "The prefix used to constitute the names of the os disks used by the slave/agent nodes"
default = "vmSlaveOSDisk-"
}
variable "vm_slave_storage_account_container_name" {
description = "The name of the storage account container used by the slave/agent nodes"
default = "vhds"
}
variable "vm_cassandra_name" {
description = "The name of the virtual machine used by Cassandra"
default = "cassandra"
}
variable "vm_cassandra_os_disk_name" {
description = "The name of the os disk used by the Cassandra virtual machine"
default = "vmCassandraOSDisk"
}
variable "vm_cassandra_storage_account_container_name" {
description = "The name of the storage account container used by the Cassandra node"
default = "vhds"
}
variable "availability_slave_name" {
description = "The name of the availability set for the slave/agent machines"
default = "availability-slave"
}
variable "script_spark_provisioner_script_file_name" {
description = "The name of the script kept in version control which will provision Spark"
default = "scriptSparkProvisioner.sh"
}
variable "script_cassandra_provisioner_script_file_name" {
description = "The name of the script kept in version control which will provision Cassandra"
default = "scriptCassandraProvisioner.sh"
}

View File

@ -1,22 +0,0 @@
# Provision a SQL Database
This sample creates a SQL Database at the "Basic" service level. The template can support other tiers of service, details for each service can be found here:
[SQL Database Pricing](https://azure.microsoft.com/en-us/pricing/details/sql-database/)
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
![graph](/examples/azure-sql-database/graph.png)

View File

@ -1,37 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan -var resource_group=$KEY -var sql_admin=$KEY -var sql_password=a!@abcd9753w0w@h@12; \
/bin/terraform apply out.tfplan; \
/bin/terraform show;"
# check that resources exist via azure cli
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az sql db show -g $KEY -n MySQLDatabase -s $KEY-sqlsvr; \
az sql server show -g $KEY -n $KEY-sqlsvr;"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force -var resource_group=$KEY -var sql_admin=$KEY -var sql_password=a!@abcd9753w0w@h@12;"

View File

@ -1,16 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$a@abcd9753w0w@h@12
# =$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 100 KiB

View File

@ -1,39 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_sql_database" "db" {
name = "mysqldatabase"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${var.location}"
edition = "Basic"
collation = "SQL_Latin1_General_CP1_CI_AS"
create_mode = "Default"
requested_service_objective_name = "Basic"
server_name = "${azurerm_sql_server.server.name}"
}
resource "azurerm_sql_server" "server" {
name = "${var.resource_group}-sqlsvr"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${var.location}"
version = "12.0"
administrator_login = "${var.sql_admin}"
administrator_login_password = "${var.sql_password}"
}
resource "azurerm_sql_firewall_rule" "fw" {
name = "firewallrules"
resource_group_name = "${azurerm_resource_group.rg.name}"
server_name = "${azurerm_sql_server.server.name}"
start_ip_address = "0.0.0.0"
end_ip_address = "0.0.0.0"
}

View File

@ -1,7 +0,0 @@
output "database_name" {
value = "${azurerm_sql_database.db.name}"
}
output "sql_server_fqdn" {
value = "${azurerm_sql_server.server.fully_qualified_domain_name}"
}

View File

@ -1,16 +0,0 @@
variable "resource_group" {
description = "The name of the resource group in which to create the virtual network."
}
variable "location" {
description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
default = "southcentralus"
}
variable "sql_admin" {
description = "The administrator username of the SQL Server."
}
variable "sql_password" {
description = "The administrator password of the SQL Server."
}

View File

@ -1,27 +0,0 @@
# Azure traffic manager with load balanced scale sets
This example shows how to create a load balanced scale set in multiple locations and then geographically load balance these using traffic manager. This example the scale set uses a market place Ubuntu image, this could be customised using an extension or a generalized image created using packer.
This script demonstrates how variable can be passed in and out of reusable modules. You will need to run `terraform get` for terrafrom to get so that modules are pre-processed.
## Keys and variables
To use this you will need to populate the `terraform.tfvars.example` file with your Azure credentials and key. Rename this to `terraform.tfvars` and copy this somewhere private. If you need to generate credentials follow the instructions on the Azure provider documented [here](https://www.terraform.io/docs/providers/azurerm)
You may also want to modify some of the settings in `variables.tf`, DNS names must be unique within an Azure location and globally for traffic management
## To start the script
### Planning
`terraform get`
`terraform plan -var-file="C:\Users\eltimmo\.terraform\keys.tfvars"`
### Apply phase
`terraform apply -var-file="C:\Users\eltimmo\.terraform\keys.tfvars"`
### Destroy
`terraform destroy -var-file="C:\Users\eltimmo\.terraform\keys.tfvars"`

View File

@ -1,102 +0,0 @@
# Provider accounts must be passed
variable "subscription_id" {}
variable "client_id" {}
variable "client_secret" {}
variable "tenant_id" {}
provider "azurerm" {
subscription_id = "${var.subscription_id}"
client_id = "${var.client_id}"
client_secret = "${var.client_secret}"
tenant_id = "${var.tenant_id}"
}
# Create the resource group and assets for first location
module "location01" {
source = "./tf_modules"
location = "${var.location01_location}"
resource_prefix = "${var.location01_resource_prefix}"
webserver_prefix = "${var.location01_webserver_prefix}"
lb_dns_label = "${var.location01_lb_dns_label}"
instance_count = "${var.instance_count}"
instance_vmprofile = "${var.instance_vmprofile}"
image_admin_username = "${var.image_admin_username}"
image_admin_password = "${var.image_admin_password}"
image_publisher = "${var.image_publisher}"
image_offer = "${var.image_offer}"
image_sku = "${var.image_sku}"
image_version = "${var.image_version}"
}
# Create the resource group and assets for second location
module "location02" {
source = "./tf_modules"
location = "${var.location02_location}"
resource_prefix = "${var.location02_resource_prefix}"
webserver_prefix = "${var.location02_webserver_prefix}"
lb_dns_label = "${var.location02_lb_dns_label}"
instance_count = "${var.instance_count}"
instance_vmprofile = "${var.instance_vmprofile}"
image_admin_username = "${var.image_admin_username}"
image_admin_password = "${var.image_admin_password}"
image_publisher = "${var.image_publisher}"
image_offer = "${var.image_offer}"
image_sku = "${var.image_sku}"
image_version = "${var.image_version}"
}
# Create global resource group
resource "azurerm_resource_group" "global_rg" {
name = "global_rg"
location = "${var.global_location}"
}
# Create the traffic manager
resource "azurerm_traffic_manager_profile" "trafficmanagerhttp" {
name = "trafficmanagerhttp"
resource_group_name = "${azurerm_resource_group.global_rg.name}"
traffic_routing_method = "Weighted"
dns_config {
relative_name = "${var.dns_relative_name}"
ttl = 100
}
monitor_config {
protocol = "http"
port = 80
path = "/"
}
}
# Add endpoint mappings to traffic manager, location01
resource "azurerm_traffic_manager_endpoint" "trafficmanagerhttp_01" {
name = "trafficmanagerhttp_ukw"
resource_group_name = "${azurerm_resource_group.global_rg.name}"
profile_name = "${azurerm_traffic_manager_profile.trafficmanagerhttp.name}"
target_resource_id = "${module.location01.webserverpublic_ip_id}"
type = "azureEndpoints"
weight = 100
}
# Add endpoint mappings to traffic manager, location02
resource "azurerm_traffic_manager_endpoint" "trafficmanagerhttp_02" {
name = "trafficmanagerhttp_wus"
resource_group_name = "${azurerm_resource_group.global_rg.name}"
profile_name = "${azurerm_traffic_manager_profile.trafficmanagerhttp.name}"
target_resource_id = "${module.location02.webserverpublic_ip_id}"
type = "azureEndpoints"
weight = 100
}

View File

@ -1,7 +0,0 @@
# Azure provide keys example. This should not be included in your repository for security reasons
# Use terrafrom -var-file="FULLPATH"
subscription_id = ""
client_id = ""
client_secret = ""
tenant_id = ""

View File

@ -1,165 +0,0 @@
variable "location" {}
variable "resource_prefix" {}
variable "webserver_prefix" {}
variable "lb_dns_label" {}
variable "instance_count" {}
variable "instance_vmprofile" {}
variable "image_admin_username" {}
variable "image_admin_password" {}
variable "image_publisher" {}
variable "image_offer" {}
variable "image_sku" {}
variable "image_version" {}
# Create webserver resource group
resource "azurerm_resource_group" "webservers_rg" {
name = "${var.resource_prefix}_rg"
location = "${var.location}"
}
# Create virtual network
resource "azurerm_virtual_network" "webservers_vnet" {
name = "webservers_vnet"
address_space = ["10.1.0.0/24"]
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.webservers_rg.name}"
}
# Create subnet
resource "azurerm_subnet" "webservers_subnet" {
name = "webservers_subnet"
resource_group_name = "${azurerm_resource_group.webservers_rg.name}"
virtual_network_name = "${azurerm_virtual_network.webservers_vnet.name}"
address_prefix = "10.1.0.0/24"
}
# Create a public ip for the location LB
resource "azurerm_public_ip" "webserverpublic_ip" {
name = "${var.resource_prefix}_publicip"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.webservers_rg.name}"
public_ip_address_allocation = "static"
domain_name_label = "${var.lb_dns_label}"
}
# Create webservers LB
resource "azurerm_lb" "webservers_lb" {
name = "webservers_lb"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.webservers_rg.name}"
frontend_ip_configuration {
name = "webserverpublic_ip"
public_ip_address_id = "${azurerm_public_ip.webserverpublic_ip.id}"
}
}
# Add the backend for webserver LB
resource "azurerm_lb_backend_address_pool" "webservers_lb_backend" {
name = "webservers_lb_backend"
resource_group_name = "${azurerm_resource_group.webservers_rg.name}"
loadbalancer_id = "${azurerm_lb.webservers_lb.id}"
}
# Create HTTP probe on port 80
resource "azurerm_lb_probe" "httpprobe" {
name = "httpprobe"
resource_group_name = "${azurerm_resource_group.webservers_rg.name}"
loadbalancer_id = "${azurerm_lb.webservers_lb.id}"
protocol = "tcp"
port = 80
}
# Create LB rule for HTTP and add to webserver LB
resource "azurerm_lb_rule" "webservers_lb_http" {
name = "webservers_lb_http"
resource_group_name = "${azurerm_resource_group.webservers_rg.name}"
loadbalancer_id = "${azurerm_lb.webservers_lb.id}"
protocol = "Tcp"
frontend_port = "80"
backend_port = "80"
frontend_ip_configuration_name = "webserverpublic_ip"
probe_id = "${azurerm_lb_probe.httpprobe.id}"
backend_address_pool_id = "${azurerm_lb_backend_address_pool.webservers_lb_backend.id}"
}
# Create storage account
resource "azurerm_storage_account" "webservers_sa" {
name = "${var.resource_prefix}storage"
resource_group_name = "${azurerm_resource_group.webservers_rg.name}"
location = "${var.location}"
account_type = "Standard_LRS"
}
# Create container
resource "azurerm_storage_container" "webservers_ct" {
name = "vhds"
resource_group_name = "${azurerm_resource_group.webservers_rg.name}"
storage_account_name = "${azurerm_storage_account.webservers_sa.name}"
container_access_type = "private"
}
# Configure the scale set using library image
resource "azurerm_virtual_machine_scale_set" "webserver_ss" {
name = "webserver_ss"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.webservers_rg.name}"
upgrade_policy_mode = "Manual"
sku {
name = "${var.instance_vmprofile}"
tier = "Standard"
capacity = "${var.instance_count}"
}
os_profile {
computer_name_prefix = "${var.webserver_prefix}"
admin_username = "${var.image_admin_username}"
admin_password = "${var.image_admin_password}"
}
os_profile_linux_config {
disable_password_authentication = false
}
network_profile {
name = "web_ss_net_profile"
primary = true
ip_configuration {
name = "web_ss_ip_profile"
subnet_id = "${azurerm_subnet.webservers_subnet.id}"
load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.webservers_lb_backend.id}"]
}
}
storage_profile_os_disk {
name = "osDiskProfile"
caching = "ReadWrite"
create_option = "FromImage"
vhd_containers = ["${azurerm_storage_account.webservers_sa.primary_blob_endpoint}${azurerm_storage_container.webservers_ct.name}"]
}
storage_profile_image_reference {
publisher = "${var.image_publisher}"
offer = "${var.image_offer}"
sku = "${var.image_sku}"
version = "${var.image_version}"
}
extension {
name = "CustomScriptForLinux"
publisher = "Microsoft.OSTCExtensions"
type = "CustomScriptForLinux"
type_handler_version = "1.4"
settings = <<SETTINGS
{
"commandToExecute" : "sudo apt-get -y install apache2"
}
SETTINGS
}
}

View File

@ -1,5 +0,0 @@
# Output public IP ID (Load Balancer) for traffic manager
output "webserverpublic_ip_id" {
value = "${azurerm_public_ip.webserverpublic_ip.id}"
}

View File

@ -1,98 +0,0 @@
# Traffic manager settings
variable "global_location" {
default = "UK West"
description = "Where any global resources will be placed"
}
variable "dns_relative_name" {
default = "azuretfexample"
description = "Relative DNS name for traffic manager"
}
# Location 01 Settings
variable "location01_location" {
default = "UK West"
description = "First location to build"
}
variable "location01_resource_prefix" {
default = "ukwestweb"
description = "Prefix for naming resource group"
}
variable "location01_webserver_prefix" {
default = "ukwwebsvr"
description = "Prefix for naming web servers"
}
variable "location01_lb_dns_label" {
default = "ukwestwebexample"
description = "DNS name label for the locations load balancer"
}
# Location 02 Settings
variable "location02_location" {
default = "West US"
description = "Second location to build"
}
variable "location02_resource_prefix" {
default = "uswestweb"
description = "Prefix for naming resource group"
}
variable "location02_webserver_prefix" {
default = "uswwebsvr"
description = "Prefix for naming web servers"
}
variable "location02_lb_dns_label" {
default = "uswestwebexample"
description = "DNS name label for the locations load balancer"
}
# Scale set and VM settings
variable "instance_count" {
default = "2"
description = "Number of server instances to create in scale set"
}
variable "instance_vmprofile" {
default = "Standard_A1"
description = "VM profile of servers in scale set"
}
# OS Profile
variable "image_admin_username" {
default = "webadmin"
description = "Local admin user name"
}
variable "image_admin_password" {
default = "2nmn39x#3775hh3x9"
description = "Password"
}
# Market place image to use
variable "image_publisher" {
default = "Canonical"
description = "Publisher of market place image"
}
variable "image_offer" {
default = "UbuntuServer"
description = "Market place image name"
}
variable "image_sku" {
default = "16.10"
description = "Market place image SKU"
}
variable "image_version" {
default = "latest"
description = "Market place image version"
}

View File

@ -1,29 +0,0 @@
# Azure Traffic Manager with virtual machines
This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-traffic-manager-vm) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected here.
This template shows how to create an Azure Traffic Manager profile to load-balance across a couple of Azure virtual machines. Each endpoint has an equal weight but different weights can be specified to distribute load non-uniformly.
See also:
- <a href="https://azure.microsoft.com/en-us/documentation/articles/traffic-manager-routing-methods/">Traffic Manager routing methods</a> for details of the different routing methods available.
- <a href="https://msdn.microsoft.com/en-us/library/azure/mt163581.aspx">Create or update a Traffic Manager profile</a> for details of the JSON elements relating to a Traffic Manager profile.
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
![`terraform graph`](/examples/azure-traffic-manager-vm/graph.png)

View File

@ -1,36 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan -var dns_name=$KEY -var resource_group=$KEY -var admin_password=$PASSWORD; \
/bin/terraform apply out.tfplan; \
/bin/terraform show;"
# cleanup deployed azure resources via azure-cli
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az vm show -g $KEY -n rgvm"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force -var dns_name=$KEY -var resource_group=$KEY -var admin_password=$PASSWORD;"

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 215 KiB

View File

@ -1,125 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_public_ip" "pip" {
name = "ip${count.index}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "dynamic"
domain_name_label = "${var.dns_name}${count.index}"
count = "${var.num_vms}"
}
resource "azurerm_virtual_network" "vnet" {
name = "${var.vnet}"
location = "${var.location}"
address_space = ["${var.address_space}"]
resource_group_name = "${azurerm_resource_group.rg.name}"
}
resource "azurerm_subnet" "subnet" {
name = "${var.subnet_name}"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "${var.subnet_prefix}"
}
resource "azurerm_network_interface" "nic" {
name = "nic${count.index}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
count = "${var.num_vms}"
ip_configuration {
name = "ipconfig${count.index}"
subnet_id = "${azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${element(azurerm_public_ip.pip.*.id, count.index)}"
}
}
resource "azurerm_virtual_machine" "vm" {
name = "vm${count.index}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
vm_size = "${var.vm_size}"
count = "${var.num_vms}"
network_interface_ids = ["${element(azurerm_network_interface.nic.*.id, count.index)}"]
storage_image_reference {
publisher = "${var.image_publisher}"
offer = "${var.image_offer}"
sku = "${var.image_sku}"
version = "${var.image_version}"
}
storage_os_disk {
name = "osdisk${count.index}"
create_option = "FromImage"
}
os_profile {
computer_name = "vm${count.index}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
os_profile_linux_config {
disable_password_authentication = false
}
}
resource "azurerm_virtual_machine_extension" "ext" {
depends_on = ["azurerm_virtual_machine.vm"]
name = "CustomScript"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
virtual_machine_name = "vm${count.index}"
publisher = "Microsoft.Azure.Extensions"
type = "CustomScript"
type_handler_version = "2.0"
count = "${var.num_vms}"
auto_upgrade_minor_version = true
settings = <<SETTINGS
{
"commandToExecute": "sudo bash -c 'apt-get update && apt-get -y install apache2' "
}
SETTINGS
}
resource "azurerm_traffic_manager_profile" "profile" {
name = "trafficmanagerprofile"
resource_group_name = "${azurerm_resource_group.rg.name}"
traffic_routing_method = "Weighted"
dns_config {
relative_name = "${azurerm_resource_group.rg.name}"
ttl = 30
}
monitor_config {
protocol = "http"
port = 80
path = "/"
}
}
resource "azurerm_traffic_manager_endpoint" "endpoint" {
name = "endpoint${count.index}"
resource_group_name = "${azurerm_resource_group.rg.name}"
profile_name = "${azurerm_traffic_manager_profile.profile.name}"
target_resource_id = "${element(azurerm_public_ip.pip.*.id, count.index)}"
type = "azureEndpoints"
weight = 1
count = 3
}

View File

@ -1,3 +0,0 @@
output "dns_name" {
value = "${var.dns_name}"
}

View File

@ -1,71 +0,0 @@
variable "resource_group" {
description = "The name of the resource group in which to create the virtual network, virtual machines, and traffic manager."
}
variable "location" {
description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
default = "southcentralus"
}
variable "dns_name" {
description = "Relative DNS name for the traffic manager profile, resulting FQDN will be <uniqueDnsName>.trafficmanager.net, must be globally unique."
}
variable "vnet" {
description = "The name of virtual network"
default = "vnet"
}
variable "num_vms" {
description = "The number of virtual machines you will provision. This variable is also used for NICs and PIPs in this Terraform script."
default = "3"
}
variable "address_space" {
description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
default = "10.0.0.0/16"
}
variable "subnet_name" {
description = "The name of the subnet"
default = "subnet"
}
variable "subnet_prefix" {
description = "The address prefix to use for the subnet"
default = "10.0.0.0/24"
}
variable "vm_size" {
description = "The size of the virtual machine"
default = "Standard_D1"
}
variable "image_publisher" {
description = "The name of the publisher of the image (az vm image list)"
default = "Canonical"
}
variable "image_offer" {
description = "The name of the offer (az vm image list)"
default = "UbuntuServer"
}
variable "image_sku" {
description = "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.10."
default = "14.04.2-LTS"
}
variable "image_version" {
description = "the version of the image to apply (az vm image list)"
default = "latest"
}
variable "admin_username" {
description = "Username for virtual machines"
default = "vmadmin"
}
variable "admin_password" {
description = "Password for virtual machines"
}

View File

@ -1,23 +0,0 @@
# Create a new VM on a new storage account from a custom image
This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-vm-custom-image-new-storage-account) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected here.
This template allows you to create a new Virtual Machine from a custom image on a new storage account deployed together with the storage account, which means the source image VHD must be transferred to the newly created storage account before that Virtual Machine is deployed. This is accomplished by the usage of a transfer virtual machine that is deployed and then uses a script via custom script extension to copy the source VHD to the destination storage account. This process is used to overcome the limitation of the custom VHD that needs to reside at the same storage account where new virtual machines based on it will be spun up, the problem arises when you are also deploying the storage account within your template, since the storage account does not exist yet, how can you add the source VHDs beforehand?
Basically, it creates two VMs, one that is the transfer virtual machine and the second that is the actual virtual machine that is the goal of the deployment. Transfer VM can be removed later.
The process of this template is:
1. A Virtual Network is deployed
2. Virtual NICs for both Virtual Machines
3. Storage Account is created
3. Transfer Virtual Machine gets deployed
4. Transfer Virtual Machine starts the custom script extension to start the VHD copy from source to destination storage acounts
5. The new Virtual Machine based on a custom image VHD gets deployed
## Requirements
* A preexisting generalized (sysprepped) Windows image. For more information on how to create custom Windows images, please refer to [How to capture a Windows virtual machine in the Resource Manager deployment model](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-windows-capture-image/) article.
* Source image blob full URL. e.g. https://pmcstorage01.blob.core.windows.net/images/images/Win10MasterImage-osDisk.72451a98-4c26-4375-90c5-0a940dd56bab.vhd. Note that container name always comes after https://pmcstorage01.blob.core.windows.net, in this example it is images. The actual blob name is **images/Win10MasterImage-osDisk.72451a98-4c26-4375-90c5-0a940dd56bab.vhd**.
![graph](/examples/azure-vm-custom-image-new-storage-account/graph.png)

View File

@ -1,65 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan \
-var source_img_uri=$EXISTING_WINDOWS_IMAGE_URI \
-var hostname=$KEY \
-var resource_group=$KEY \
-var existing_resource_group=$EXISTING_RESOURCE_GROUP \
-var admin_password=$PASSWORD \
-var existing_storage_acct=$EXISTING_STORAGE_ACCOUNT_NAME \
-var custom_image_name=$WINDOWS_DISK_NAME; \
/bin/terraform apply out.tfplan; \
/bin/terraform show;"
# cleanup deployed azure resources via azure-cli
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az vm show -g $KEY -n myvm; \
az storage account show -g $KEY -n $KEY;"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force \
-var source_img_uri=$EXISTING_WINDOWS_IMAGE_URI \
-var hostname=$KEY \
-var resource_group=$KEY \
-var existing_resource_group=$EXISTING_RESOURCE_GROUP \
-var admin_password=$PASSWORD \
-var existing_storage_acct=$EXISTING_STORAGE_ACCOUNT_NAME \
-var custom_image_name=$WINDOWS_DISK_NAME \
-target=azurerm_virtual_machine.myvm \
-target=azurerm_virtual_machine.transfer \
-target=azurerm_network_interface.transfernic \
-target=azurerm_network_interface.mynic \
-target=azurerm_virtual_network.vnet \
-target=azurerm_public_ip.mypip \
-target=azurerm_public_ip.transferpip \
-target=azurerm_storage_account.stor;"
# If you target the resource group to destroy with Terraform, it will destroy the existing storage account, so it must be deleted manually with the CLI.
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az group delete -n $KEY -y"

View File

@ -1,20 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
export EXISTING_RESOURCE_GROUP=donotdelete
export EXISTING_WINDOWS_IMAGE_URI=https://tfpermstor.blob.core.windows.net/vhds/osdisk_alBZrO4OlX.vhd
export EXISTING_STORAGE_ACCOUNT_NAME=donotdeletedisks636
export CUSTOM_WINDOWS_IMAGE_NAME=WindowsImage
export WINDOWS_DISK_NAME=osdisk_alBZrO4OlX
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 415 KiB

View File

@ -1,165 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_virtual_network" "vnet" {
name = "${var.hostname}vnet"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_space = ["${var.address_space}"]
}
resource "azurerm_subnet" "subnet" {
name = "${var.hostname}subnet"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "${var.subnet_prefix}"
}
resource "azurerm_public_ip" "transferpip" {
name = "transferpip"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "Static"
}
resource "azurerm_network_interface" "transfernic" {
name = "transfernic"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
ip_configuration {
name = "${azurerm_public_ip.transferpip.name}"
subnet_id = "${azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Static"
public_ip_address_id = "${azurerm_public_ip.transferpip.id}"
private_ip_address = "10.0.0.5"
}
}
resource "azurerm_public_ip" "mypip" {
name = "mypip"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "Dynamic"
}
resource "azurerm_network_interface" "mynic" {
name = "mynic"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
ip_configuration {
name = "${azurerm_public_ip.mypip.name}"
subnet_id = "${azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${azurerm_public_ip.mypip.id}"
}
}
resource "azurerm_storage_account" "existing" {
name = "${var.existing_storage_acct}"
resource_group_name = "${var.existing_resource_group}"
location = "${azurerm_resource_group.rg.location}"
account_type = "${var.existing_storage_acct_type}"
lifecycle = {
prevent_destroy = true
}
}
resource "azurerm_storage_account" "stor" {
name = "${var.hostname}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${azurerm_resource_group.rg.location}"
account_type = "${var.storage_account_type}"
}
resource "azurerm_virtual_machine" "transfer" {
name = "${var.transfer_vm_name}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
vm_size = "${var.vm_size}"
network_interface_ids = ["${azurerm_network_interface.transfernic.id}"]
storage_os_disk {
name = "${var.hostname}-osdisk"
image_uri = "${var.source_img_uri}"
vhd_uri = "https://${var.existing_storage_acct}.blob.core.windows.net/${var.existing_resource_group}-vhds/${var.hostname}osdisk.vhd"
os_type = "${var.os_type}"
caching = "ReadWrite"
create_option = "FromImage"
}
os_profile {
computer_name = "${var.hostname}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
}
resource "azurerm_virtual_machine_extension" "script" {
name = "CustomScriptExtension"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
virtual_machine_name = "${azurerm_virtual_machine.transfer.name}"
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.4"
depends_on = ["azurerm_virtual_machine.transfer"]
settings = <<SETTINGS
{
"commandToExecute": "powershell -ExecutionPolicy Unrestricted -Command \"Invoke-WebRequest -Uri https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/201-vm-custom-image-new-storage-account/ImageTransfer.ps1 -OutFile C:/ImageTransfer.ps1\" "
}
SETTINGS
}
resource "azurerm_virtual_machine_extension" "execute" {
name = "CustomScriptExtension"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
virtual_machine_name = "${azurerm_virtual_machine.transfer.name}"
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.4"
depends_on = ["azurerm_virtual_machine_extension.script"]
settings = <<SETTINGS
{
"commandToExecute": "powershell -ExecutionPolicy Unrestricted -File C:\\ImageTransfer.ps1 -SourceImage ${var.source_img_uri} -SourceSAKey ${azurerm_storage_account.existing.primary_access_key} -DestinationURI https://${azurerm_storage_account.stor.name}.blob.core.windows.net/vhds -DestinationSAKey ${azurerm_storage_account.stor.primary_access_key}\" "
}
SETTINGS
}
resource "azurerm_virtual_machine" "myvm" {
name = "${var.new_vm_name}"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
vm_size = "${var.vm_size}"
network_interface_ids = ["${azurerm_network_interface.mynic.id}"]
depends_on = ["azurerm_virtual_machine_extension.execute"]
storage_os_disk {
name = "${var.hostname}osdisk"
image_uri = "https://${azurerm_storage_account.stor.name}.blob.core.windows.net/vhds/${var.custom_image_name}.vhd"
vhd_uri = "https://${var.hostname}.blob.core.windows.net/${var.hostname}-vhds/${var.hostname}osdisk.vhd"
os_type = "${var.os_type}"
caching = "ReadWrite"
create_option = "FromImage"
}
os_profile {
computer_name = "${var.hostname}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
}

View File

@ -1,15 +0,0 @@
output "hostname" {
value = "${var.hostname}"
}
output "ip_address" {
value = "${azurerm_public_ip.transferpip.ip_address}"
}
output "fqdn" {
value = "${azurerm_public_ip.transferpip.ip_address}"
}
output "id" {
value = "${azurerm_public_ip.transferpip.id}"
}

View File

@ -1,97 +0,0 @@
variable "resource_group" {
description = "Name of the resource group in which to deploy your new Virtual Machines"
}
variable "location" {
description = "The location/region where the virtual network resides."
default = "southcentralus"
}
variable "hostname" {
description = "This variable is used in this template to create various other names, such as vnet name, subnet name, storage account name, et. al."
}
variable "os_type" {
description = "Type of OS on the existing vhd. Allowed values: 'windows' or 'linux'."
default = "windows"
}
variable "existing_storage_acct" {
description = "The name of the storage account in which your existing VHD and image reside"
}
variable "existing_storage_acct_type" {
description = "The type of the storage account in which your existing VHD and image reside"
default = "Standard_LRS"
}
variable "existing_resource_group" {
description = "The name of the resource group in which your existing storage account with your existing VHD resides"
}
variable "address_space" {
description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
default = "10.0.0.0/16"
}
variable "subnet_prefix" {
description = "The address prefix to use for the subnet."
default = "10.0.0.0/24"
}
variable "storage_account_type" {
description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
default = "Standard_LRS"
}
variable "vm_size" {
description = "VM size of new virtual machine that will be deployed from a custom image."
default = "Standard_DS1_v2"
}
variable "image_publisher" {
description = "name of the publisher of the image (az vm image list)"
default = "MicrosoftWindowsServer"
}
variable "image_offer" {
description = "the name of the offer (az vm image list)"
default = "WindowsServer"
}
variable "image_sku" {
description = "image sku to apply (az vm image list)"
default = "2012-R2-Datacenter"
}
variable "image_version" {
description = "version of the image to apply (az vm image list)"
default = "latest"
}
variable "admin_username" {
description = "Name of the local administrator account, this cannot be 'Admin', 'Administrator', or 'root'."
default = "vmadmin"
}
variable "admin_password" {
description = "Local administrator password, complex password is required, do not use any variation of the word 'password' because it will be rejected. Minimum 8 characters."
}
variable "transfer_vm_name" {
description = "Name of the Windows VM that will perform the copy of the VHD from a source storage account to the new storage account created in the new deployment, this is known as transfer vm. Must be 3-15 characters."
default = "transfervm"
}
variable "new_vm_name" {
description = "Name of the new VM deployed from the custom image. Must be 3-15 characters."
default = "myvm"
}
variable "custom_image_name" {
description = "Name of the VHD to be used as source syspreped/generalized image to deploy the VM, for example 'mybaseimage.vhd'"
}
variable "source_img_uri" {
description = "Full URIs for one or more custom images (VHDs) that should be copied to the deployment storage account to spin up new VMs from them. URLs must be comma separated."
}

View File

@ -1,26 +0,0 @@
# [Create a Virtual Machine from a User Image](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-deploy-templates#create-a-custom-vm-image)
This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/101-vm-from-user-image) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected here.
> Prerequisite - The generalized image VHD should exist, as well as a Storage Account for boot diagnostics
This template allows you to create a Virtual Machine from an unmanaged User image vhd. This template also deploys a Virtual Network, Public IP addresses and a Network Interface.
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
![graph](/examples/azure-vm-from-user-image/graph.png)

View File

@ -1,51 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan -var hostname=$KEY -var resource_group=$EXISTING_RESOURCE_GROUP -var admin_username=$KEY -var admin_password=$PASSWORD -var image_uri=$EXISTING_LINUX_IMAGE_URI -var storage_account_name=$EXISTING_STORAGE_ACCOUNT_NAME; \
/bin/terraform apply out.tfplan; \
/bin/terraform show;"
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az vm show --name $KEY --resource-group permanent"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force \
-var hostname=$KEY \
-var resource_group=$EXISTING_RESOURCE_GROUP \
-var admin_username=$KEY \
-var admin_password=$PASSWORD \
-var image_uri=$EXISTING_LINUX_IMAGE_URI \
-var storage_account_name=$EXISTING_STORAGE_ACCOUNT_NAME \
-target=azurerm_virtual_machine.vm \
-target=azurerm_network_interface.nic \
-target=azurerm_virtual_network.vnet \
-target=azurerm_public_ip.pip;"
# The os disks must be deleted manually from the permanent resource group as this group is not under Terraform's state.
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az disk delete --name $KEY-osdisk --resource-group permanent -y"

View File

@ -1,18 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
export EXISTING_LINUX_IMAGE_URI=https://tfpermstor.blob.core.windows.net/vhds/osdisk_fmF5O5MxlR.vhd
export EXISTING_STORAGE_ACCOUNT_NAME=tfpermstor
export EXISTING_RESOURCE_GROUP=permanent
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 166 KiB

View File

@ -1,73 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_virtual_network" "vnet" {
name = "${var.hostname}vnet"
location = "${var.location}"
address_space = ["${var.address_space}"]
resource_group_name = "${azurerm_resource_group.rg.name}"
}
resource "azurerm_subnet" "subnet" {
name = "${var.hostname}subnet"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "${var.subnet_prefix}"
}
resource "azurerm_network_interface" "nic" {
name = "${var.hostname}nic"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
ip_configuration {
name = "${var.hostname}ipconfig"
subnet_id = "${azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${azurerm_public_ip.pip.id}"
}
}
resource "azurerm_public_ip" "pip" {
name = "${var.hostname}-ip"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "Dynamic"
domain_name_label = "${var.hostname}"
}
resource "azurerm_virtual_machine" "vm" {
name = "${var.hostname}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
vm_size = "${var.vm_size}"
network_interface_ids = ["${azurerm_network_interface.nic.id}"]
storage_os_disk {
name = "${var.hostname}-osdisk1"
image_uri = "${var.image_uri}"
vhd_uri = "https://${var.storage_account_name}.blob.core.windows.net/vhds/${var.hostname}-osdisk.vhd"
os_type = "${var.os_type}"
caching = "ReadWrite"
create_option = "FromImage"
}
os_profile {
computer_name = "${var.hostname}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
os_profile_linux_config {
disable_password_authentication = false
}
}

View File

@ -1,11 +0,0 @@
output "hostname" {
value = "${var.hostname}"
}
output "vm_fqdn" {
value = "${azurerm_public_ip.pip.fqdn}"
}
output "ssh_command" {
value = "${concat("ssh ", var.admin_username, "@", azurerm_public_ip.pip.fqdn)}"
}

View File

@ -1,55 +0,0 @@
variable "resource_group" {
description = "The name of the resource group in which the image to clone resides."
default = "myrg"
}
variable "image_uri" {
description = "Specifies the image_uri in the form publisherName:offer:skus:version. image_uri can also specify the VHD uri of a custom VM image to clone."
}
variable "os_type" {
description = "Specifies the operating system Type, valid values are windows, linux."
default = "linux"
}
variable "location" {
description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
default = "southcentralus"
}
variable "address_space" {
description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
default = "10.0.0.0/24"
}
variable "subnet_prefix" {
description = "The address prefix to use for the subnet."
default = "10.0.0.0/24"
}
variable "storage_account_name" {
description = "The name of the storage account in which the image from which you are cloning resides."
}
variable "storage_account_type" {
description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
default = "Premium_LRS"
}
variable "vm_size" {
description = "Specifies the size of the virtual machine. This must be the same as the vm image from which you are copying."
default = "Standard_DS1_v2"
}
variable "hostname" {
description = "VM name referenced also in storage-related names. This is also used as the label for the Domain Name and to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system."
}
variable "admin_username" {
description = "administrator user name"
default = "vmadmin"
}
variable "admin_password" {
description = "The Password for the account specified in the 'admin_username' field. We recommend disabling Password Authentication in a Production environment."
}

View File

@ -1,22 +0,0 @@
# Very simple deployment of a Linux VM
This template allows you to deploy a simple Linux VM using a few different options for the Ubuntu version, using the latest patched version. This will deploy an A0 size VM in the resource group location and return the FQDN of the VM.
This template takes a minimum amount of parameters and deploys a Linux VM, using the latest patched version.
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
![graph](/examples/azure-vm-simple-linux-managed-disk/graph.png)

View File

@ -1,36 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan -var dns_name=$KEY -var hostname=$KEY -var resource_group=$KEY -var admin_password=$PASSWORD; \
/bin/terraform apply out.tfplan; \
/bin/terraform show;"
# cleanup deployed azure resources via azure-cli
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az vm show -g $KEY -n rgvm"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force -var dns_name=$KEY -var hostname=$KEY -var resource_group=$KEY -var admin_password=$PASSWORD;"

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 229 KiB

View File

@ -1,108 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_virtual_network" "vnet" {
name = "${var.virtual_network_name}"
location = "${var.location}"
address_space = ["${var.address_space}"]
resource_group_name = "${azurerm_resource_group.rg.name}"
}
resource "azurerm_subnet" "subnet" {
name = "${var.rg_prefix}subnet"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
resource_group_name = "${azurerm_resource_group.rg.name}"
address_prefix = "${var.subnet_prefix}"
}
resource "azurerm_network_interface" "nic" {
name = "${var.rg_prefix}nic"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
ip_configuration {
name = "${var.rg_prefix}ipconfig"
subnet_id = "${azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${azurerm_public_ip.pip.id}"
}
}
resource "azurerm_public_ip" "pip" {
name = "${var.rg_prefix}-ip"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "Dynamic"
domain_name_label = "${var.dns_name}"
}
resource "azurerm_storage_account" "stor" {
name = "${var.dns_name}stor"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
account_type = "${var.storage_account_type}"
}
resource "azurerm_managed_disk" "datadisk" {
name = "${var.hostname}-datadisk"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
storage_account_type = "Standard_LRS"
create_option = "Empty"
disk_size_gb = "1023"
}
resource "azurerm_virtual_machine" "vm" {
name = "${var.rg_prefix}vm"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
vm_size = "${var.vm_size}"
network_interface_ids = ["${azurerm_network_interface.nic.id}"]
storage_image_reference {
publisher = "${var.image_publisher}"
offer = "${var.image_offer}"
sku = "${var.image_sku}"
version = "${var.image_version}"
}
storage_os_disk {
name = "${var.hostname}-osdisk"
managed_disk_type = "Standard_LRS"
caching = "ReadWrite"
create_option = "FromImage"
}
storage_data_disk {
name = "${var.hostname}-datadisk"
managed_disk_id = "${azurerm_managed_disk.datadisk.id}"
managed_disk_type = "Standard_LRS"
disk_size_gb = "1023"
create_option = "Attach"
lun = 0
}
os_profile {
computer_name = "${var.hostname}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
os_profile_linux_config {
disable_password_authentication = false
}
boot_diagnostics {
enabled = true
storage_uri = "${azurerm_storage_account.stor.primary_blob_endpoint}"
}
}

View File

@ -1,11 +0,0 @@
output "hostname" {
value = "${var.hostname}"
}
output "vm_fqdn" {
value = "${azurerm_public_ip.pip.fqdn}"
}
output "ssh_command" {
value = "ssh ${var.admin_username}@${azurerm_public_ip.pip.fqdn}"
}

View File

@ -1,75 +0,0 @@
variable "resource_group" {
description = "The name of the resource group in which to create the virtual network."
}
variable "rg_prefix" {
description = "The shortened abbreviation to represent your resource group that will go on the front of some resources."
default = "rg"
}
variable "hostname" {
description = "VM name referenced also in storage-related names."
}
variable "dns_name" {
description = " Label for the Domain Name. Will be used to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system."
}
variable "location" {
description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
default = "southcentralus"
}
variable "virtual_network_name" {
description = "The name for the virtual network."
default = "vnet"
}
variable "address_space" {
description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
default = "10.0.0.0/16"
}
variable "subnet_prefix" {
description = "The address prefix to use for the subnet."
default = "10.0.10.0/24"
}
variable "storage_account_type" {
description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
default = "Standard_LRS"
}
variable "vm_size" {
description = "Specifies the size of the virtual machine."
default = "Standard_A0"
}
variable "image_publisher" {
description = "name of the publisher of the image (az vm image list)"
default = "Canonical"
}
variable "image_offer" {
description = "the name of the offer (az vm image list)"
default = "UbuntuServer"
}
variable "image_sku" {
description = "image sku to apply (az vm image list)"
default = "16.04-LTS"
}
variable "image_version" {
description = "version of the image to apply (az vm image list)"
default = "latest"
}
variable "admin_username" {
description = "administrator user name"
default = "vmadmin"
}
variable "admin_password" {
description = "administrator password (recommended to disable password auth)"
}

View File

@ -1,35 +0,0 @@
# Create a specialized virtual machine in an existing virtual network [![Build Status](https://travis-ci.org/harijayms/terraform.svg?branch=topic-201-vm-specialized-vhd-existing-vnet)](https://travis-ci.org/harijayms/terraform)
This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-vm-specialized-vhd-existing-vnet) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected in this Terraform template.
## Prerequisites
- VHD file from which to create a VM that already exists in a storage account
- Name of the existing VNET and subnet to which the new virtual machine will connect
- Name of the Resource Group in which the VNET resides
### NOTE
This template will create an additional Standard_GRS storage account for enabling boot diagnostics each time you execute this template. To avoid running into storage account limits, it is best to delete the storage account when the VM is deleted.
This template creates a VM from a specialized VHD and lets you connect it to an existing VNET that can reside in a different Resource Group from which the virtual machine resides.
_Please note: This deployment template does not create or attach an existing Network Security Group to the virtual machine._
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
![graph](/examples/azure-vm-specialized-vhd-existing-vnet/graph.png)

View File

@ -1,55 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan \
-var os_disk_vhd_uri=$EXISTING_LINUX_IMAGE_URI \
-var hostname=$KEY \
-var resource_group=$KEY \
-var existing_vnet_resource_group=$EXISTING_RESOURCE_GROUP \
-var admin_password=$PASSWORD \
-var existing_subnet_id=/subscriptions/$ARM_SUBSCRIPTION_ID/resourceGroups/permanent/providers/Microsoft.Network/virtualNetworks/$EXISTING_VIRTUAL_NETWORK_NAME/subnets/$EXISTING_SUBNET_NAME \
-var existing_subnet_name=$EXISTING_SUBNET_NAME \
-var existing_virtual_network_name=$EXISTING_VIRTUAL_NETWORK_NAME \
-var existing_storage_acct=$EXISTING_STORAGE_ACCOUNT_NAME; \
/bin/terraform apply out.tfplan; \
/bin/terraform show;"
# cleanup deployed azure resources via azure-cli
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az vm show -g $KEY -n $KEY"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force \
-var os_disk_vhd_uri=$EXISTING_LINUX_IMAGE_URI \
-var hostname=$KEY \
-var resource_group=$KEY \
-var existing_vnet_resource_group=$EXISTING_RESOURCE_GROUP \
-var admin_password=$PASSWORD \
-var existing_subnet_id=/subscriptions/$ARM_SUBSCRIPTION_ID/resourceGroups/permanent/providers/Microsoft.Network/virtualNetworks/$EXISTING_VIRTUAL_NETWORK_NAME/subnets/$EXISTING_SUBNET_NAME \
-var existing_subnet_name=$EXISTING_SUBNET_NAME \
-var existing_virtual_network_name=$EXISTING_VIRTUAL_NETWORK_NAME \
-var existing_storage_acct=$EXISTING_STORAGE_ACCOUNT_NAME \
-target=azurerm_resource_group.rg"

View File

@ -1,20 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
export EXISTING_RESOURCE_GROUP=permanent
export EXISTING_LINUX_IMAGE_URI=https://tfpermstor.blob.core.windows.net/vhds/osdisk_fmF5O5MxlR.vhd
export EXISTING_STORAGE_ACCOUNT_NAME=tfpermstor
export EXISTING_VIRTUAL_NETWORK_NAME=permanent-vnet
export EXISTING_SUBNET_NAME=permanent-subnet
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 239 KiB

View File

@ -1,71 +0,0 @@
# provider "azurerm" {
# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_public_ip" "pip" {
name = "PublicIp"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "Dynamic"
domain_name_label = "${var.hostname}"
}
resource "azurerm_network_interface" "nic" {
name = "nic"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
ip_configuration {
name = "ipconfig"
subnet_id = "${var.existing_subnet_id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${azurerm_public_ip.pip.id}"
}
}
resource "azurerm_storage_account" "stor" {
name = "${var.hostname}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${var.location}"
account_type = "${var.storage_account_type}"
}
resource "azurerm_virtual_machine" "vm" {
name = "${var.hostname}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
vm_size = "${var.vm_size}"
network_interface_ids = ["${azurerm_network_interface.nic.id}"]
storage_os_disk {
name = "${var.hostname}osdisk1"
image_uri = "${var.os_disk_vhd_uri}"
vhd_uri = "https://${var.existing_storage_acct}.blob.core.windows.net/${var.existing_vnet_resource_group}-vhds/${var.hostname}osdisk.vhd"
os_type = "${var.os_type}"
caching = "ReadWrite"
create_option = "FromImage"
}
os_profile {
computer_name = "${var.hostname}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
os_profile_linux_config {
disable_password_authentication = false
}
boot_diagnostics {
enabled = true
storage_uri = "${azurerm_storage_account.stor.primary_blob_endpoint}"
}
}

View File

@ -1,11 +0,0 @@
output "hostname" {
value = "${var.hostname}"
}
output "vm_fqdn" {
value = "${azurerm_public_ip.pip.fqdn}"
}
output "ssh_command" {
value = "ssh ${var.admin_username}@${azurerm_public_ip.pip.fqdn}"
}

View File

@ -1,90 +0,0 @@
variable "resource_group" {
description = "Name of the resource group in which to deploy your new Virtual Machine"
}
variable "existing_vnet_resource_group" {
description = "Name of the existing resource group in which the existing vnet resides"
}
variable "location" {
description = "The location/region where the virtual network resides."
default = "southcentralus"
}
variable "hostname" {
description = "This variable is used in this template to create the domain name label as well as the virtual machine name. Must be unique."
}
variable "os_type" {
description = "Type of OS on the existing vhd. Allowed values: 'windows' or 'linux'."
default = "linux"
}
variable "os_disk_vhd_uri" {
description = "Uri of the existing VHD in ARM standard or premium storage"
}
variable "existing_storage_acct" {
description = "The name of the storage account in which your existing VHD and image reside"
}
variable "existing_virtual_network_name" {
description = "The name for the existing virtual network"
}
variable "existing_subnet_name" {
description = "The name for the existing subnet in the existing virtual network"
}
variable "existing_subnet_id" {
description = "The id for the existing subnet in the existing virtual network"
}
variable "address_space" {
description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
default = "10.0.0.0/16"
}
variable "subnet_prefix" {
description = "The address prefix to use for the subnet."
default = "10.0.10.0/24"
}
variable "storage_account_type" {
description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
default = "Standard_GRS"
}
variable "vm_size" {
description = "Specifies the size of the virtual machine."
default = "Standard_DS1_v2"
}
variable "image_publisher" {
description = "name of the publisher of the image (az vm image list)"
default = "Canonical"
}
variable "image_offer" {
description = "the name of the offer (az vm image list)"
default = "UbuntuServer"
}
variable "image_sku" {
description = "image sku to apply (az vm image list)"
default = "16.04-LTS"
}
variable "image_version" {
description = "version of the image to apply (az vm image list)"
default = "latest"
}
variable "admin_username" {
description = "administrator user name"
default = "vmadmin"
}
variable "admin_password" {
description = "administrator password (recommended to disable password auth)"
}

View File

@ -1,22 +0,0 @@
# Linux VM Scale Set
This template deploys a desired count Linux VM Scale Set. Once the VMSS is deployed, the user can deploy an application inside each of the VMs (either by directly logging into the VMs or via a [`remote-exec` provisioner](https://www.terraform.io/docs/provisioners/remote-exec.html)).
## main.tf
The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
## outputs.tf
This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
## provider.tf
You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file.
Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
## terraform.tfvars
If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
## variables.tf
The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
![`terraform graph`](/examples/azure-vmss-ubuntu/graph.png)

View File

@ -1,35 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "cd /data; \
/bin/terraform get; \
/bin/terraform validate; \
/bin/terraform plan -out=out.tfplan -var admin_username=$KEY -var hostname=$KEY -var vmss_name=$KEY -var resource_group=$KEY -var admin_password=$PASSWORD; \
/bin/terraform apply out.tfplan"
# cleanup deployed azure resources via azure-cli
docker run --rm -it \
azuresdk/azure-cli-python:0.2.10 \
sh -c "az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID > /dev/null; \
az resource list -g $KEY;"
# cleanup deployed azure resources via terraform
docker run --rm -it \
-e ARM_CLIENT_ID \
-e ARM_CLIENT_SECRET \
-e ARM_SUBSCRIPTION_ID \
-e ARM_TENANT_ID \
-v $(pwd):/data \
--workdir=/data \
--entrypoint "/bin/sh" \
hashicorp/terraform:light \
-c "/bin/terraform destroy -force -var resource_group=$KEY -var admin_username=$KEY -var hostname=$KEY -var vmss_name=$KEY -var admin_password=$PASSWORD;"

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -o errexit -o nounset
if docker -v; then
# generate a unique string for CI deployment
export KEY=$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-z' | head -c 12)
export PASSWORD=$KEY$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'A-Z' | head -c 2)$(cat /dev/urandom | env LC_CTYPE=C tr -cd '0-9' | head -c 2)
/bin/sh ./deploy.ci.sh
else
echo "Docker is used to run terraform commands, please install before run: https://docs.docker.com/docker-for-mac/install/"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 202 KiB

View File

@ -1,127 +0,0 @@
# provider "azurerm" {
# subscription_id = "${var.subscription_id}"
# client_id = "${var.client_id}"
# client_secret = "${var.client_secret}"
# tenant_id = "${var.tenant_id}"
# }
resource "azurerm_resource_group" "rg" {
name = "${var.resource_group}"
location = "${var.location}"
}
resource "azurerm_virtual_network" "vnet" {
name = "${var.resource_group}vnet"
location = "${azurerm_resource_group.rg.location}"
address_space = ["10.0.0.0/16"]
resource_group_name = "${azurerm_resource_group.rg.name}"
}
resource "azurerm_subnet" "subnet" {
name = "subnet"
address_prefix = "10.0.0.0/24"
resource_group_name = "${azurerm_resource_group.rg.name}"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
}
resource "azurerm_public_ip" "pip" {
name = "${var.hostname}-pip"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "Dynamic"
domain_name_label = "${var.hostname}"
}
resource "azurerm_lb" "lb" {
name = "LoadBalancer"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
depends_on = ["azurerm_public_ip.pip"]
frontend_ip_configuration {
name = "LBFrontEnd"
public_ip_address_id = "${azurerm_public_ip.pip.id}"
}
}
resource "azurerm_lb_backend_address_pool" "backlb" {
name = "BackEndAddressPool"
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.lb.id}"
}
resource "azurerm_lb_nat_pool" "np" {
resource_group_name = "${azurerm_resource_group.rg.name}"
loadbalancer_id = "${azurerm_lb.lb.id}"
name = "NATPool"
protocol = "Tcp"
frontend_port_start = 50000
frontend_port_end = 50119
backend_port = 22
frontend_ip_configuration_name = "LBFrontEnd"
}
resource "azurerm_storage_account" "stor" {
name = "${var.resource_group}stor"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
account_type = "${var.storage_account_type}"
}
resource "azurerm_storage_container" "vhds" {
name = "vhds"
resource_group_name = "${azurerm_resource_group.rg.name}"
storage_account_name = "${azurerm_storage_account.stor.name}"
container_access_type = "blob"
}
resource "azurerm_virtual_machine_scale_set" "scaleset" {
name = "autoscalewad"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
upgrade_policy_mode = "Manual"
overprovision = true
depends_on = ["azurerm_lb.lb", "azurerm_virtual_network.vnet"]
sku {
name = "${var.vm_sku}"
tier = "Standard"
capacity = "${var.instance_count}"
}
os_profile {
computer_name_prefix = "${var.vmss_name}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
os_profile_linux_config {
disable_password_authentication = false
}
network_profile {
name = "${var.hostname}-nic"
primary = true
ip_configuration {
name = "${var.hostname}ipconfig"
subnet_id = "${azurerm_subnet.subnet.id}"
load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.backlb.id}"]
load_balancer_inbound_nat_rules_ids = ["${element(azurerm_lb_nat_pool.np.*.id, count.index)}"]
}
}
storage_profile_os_disk {
name = "${var.hostname}"
caching = "ReadWrite"
create_option = "FromImage"
vhd_containers = ["${azurerm_storage_account.stor.primary_blob_endpoint}${azurerm_storage_container.vhds.name}"]
}
storage_profile_image_reference {
publisher = "${var.image_publisher}"
offer = "${var.image_offer}"
sku = "${var.ubuntu_os_version}"
version = "latest"
}
}

Some files were not shown because too many files have changed in this diff Show More