diff --git a/.gitignore b/.gitignore index e852cc3b5..e0b6954f8 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,5 @@ website/node_modules *.tfstate *.log *.bak +*~ +.*.swp diff --git a/CHANGELOG.md b/CHANGELOG.md index 810a6c172..69c05ee26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,17 @@ ## 0.3.7 (unreleased) +FEATURES: + + * **New provider: `azure`** - initially just supporting Linux virtual + machines [GH-899] + IMPROVEMENTS: + * **New resources: `google_compute_forwarding_rule`, `google_compute_http_health_check`, + and `google_compute_target_pool`** - Together these provide network-level + load balancing. [GH-588] + * **New resource: `aws_main_route_table_association`** - Manage the main routing table + of a VPC. [GH-918] * core: Formalized the syntax of interpolations and documented it very heavily. * core: Strings in interpolations can now contain further interpolations, @@ -12,8 +22,11 @@ IMPROVEMENTS: * provider/aws: The `aws_db_instance` resource no longer requires both `final_snapshot_identifier` and `skip_final_snapshot`; the presence or absence of the former now implies the latter. [GH-874] + * provider/aws: Avoid unecessary update of `aws_subnet` when + `map_public_ip_on_launch` is not specified in config. [GH-898] * provider/google: Remove "client secrets file", as it's no longer necessary for API authentication [GH-884]. + * provider/google: Expose `self_link` on `google_compute_instance` [GH-906] BUG FIXES: diff --git a/Makefile b/Makefile index fde469c9c..7ea72d97f 100644 --- a/Makefile +++ b/Makefile @@ -32,14 +32,13 @@ testrace: generate # updatedeps installs all the dependencies that Terraform needs to run # and build. updatedeps: - $(eval REF := $(shell sh -c "\ - git symbolic-ref --short HEAD 2>/dev/null \ - || git rev-parse HEAD")) go get -u github.com/mitchellh/gox go get -u golang.org/x/tools/cmd/stringer - go get -u golang.org/x/tools/cmd/vet - go get -f -u -v ./... - git checkout $(REF) + go list ./... \ + | xargs go list -f '{{join .Deps "\n"}}' \ + | grep -v github.com/hashicorp/terraform \ + | sort -u \ + | xargs go get -f -u -v # vet runs the Go source code static analysis tool `vet` to find # any common errors. diff --git a/builtin/bins/provider-azure/main.go b/builtin/bins/provider-azure/main.go new file mode 100644 index 000000000..45af21656 --- /dev/null +++ b/builtin/bins/provider-azure/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/azure" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: azure.Provider, + }) +} diff --git a/builtin/bins/provider-azure/main_test.go b/builtin/bins/provider-azure/main_test.go new file mode 100644 index 000000000..06ab7d0f9 --- /dev/null +++ b/builtin/bins/provider-azure/main_test.go @@ -0,0 +1 @@ +package main diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 3c417d32d..90e43011a 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -45,26 +45,27 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "aws_autoscaling_group": resourceAwsAutoscalingGroup(), - "aws_db_instance": resourceAwsDbInstance(), - "aws_db_parameter_group": resourceAwsDbParameterGroup(), - "aws_db_security_group": resourceAwsDbSecurityGroup(), - "aws_db_subnet_group": resourceAwsDbSubnetGroup(), - "aws_eip": resourceAwsEip(), - "aws_elb": resourceAwsElb(), - "aws_instance": resourceAwsInstance(), - "aws_internet_gateway": resourceAwsInternetGateway(), - "aws_key_pair": resourceAwsKeyPair(), - "aws_launch_configuration": resourceAwsLaunchConfiguration(), - "aws_network_acl": resourceAwsNetworkAcl(), - "aws_route53_record": resourceAwsRoute53Record(), - "aws_route53_zone": resourceAwsRoute53Zone(), - "aws_route_table": resourceAwsRouteTable(), - "aws_route_table_association": resourceAwsRouteTableAssociation(), - "aws_s3_bucket": resourceAwsS3Bucket(), - "aws_security_group": resourceAwsSecurityGroup(), - "aws_subnet": resourceAwsSubnet(), - "aws_vpc": resourceAwsVpc(), + "aws_autoscaling_group": resourceAwsAutoscalingGroup(), + "aws_db_instance": resourceAwsDbInstance(), + "aws_db_parameter_group": resourceAwsDbParameterGroup(), + "aws_db_security_group": resourceAwsDbSecurityGroup(), + "aws_db_subnet_group": resourceAwsDbSubnetGroup(), + "aws_eip": resourceAwsEip(), + "aws_elb": resourceAwsElb(), + "aws_instance": resourceAwsInstance(), + "aws_internet_gateway": resourceAwsInternetGateway(), + "aws_key_pair": resourceAwsKeyPair(), + "aws_launch_configuration": resourceAwsLaunchConfiguration(), + "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), + "aws_network_acl": resourceAwsNetworkAcl(), + "aws_route53_record": resourceAwsRoute53Record(), + "aws_route53_zone": resourceAwsRoute53Zone(), + "aws_route_table": resourceAwsRouteTable(), + "aws_route_table_association": resourceAwsRouteTableAssociation(), + "aws_s3_bucket": resourceAwsS3Bucket(), + "aws_security_group": resourceAwsSecurityGroup(), + "aws_subnet": resourceAwsSubnet(), + "aws_vpc": resourceAwsVpc(), }, ConfigureFunc: providerConfigure, diff --git a/builtin/providers/aws/resource_aws_elb.go b/builtin/providers/aws/resource_aws_elb.go index 15feae278..b56d47d87 100644 --- a/builtin/providers/aws/resource_aws_elb.go +++ b/builtin/providers/aws/resource_aws_elb.go @@ -119,7 +119,6 @@ func resourceAwsElb() *schema.Resource { "health_check": &schema.Schema{ Type: schema.TypeSet, Optional: true, - ForceNew: true, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -331,6 +330,28 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { d.SetPartial("cross_zone_load_balancing") } + if d.HasChange("health_check") { + vs := d.Get("health_check").(*schema.Set).List() + if len(vs) > 0 { + check := vs[0].(map[string]interface{}) + configureHealthCheckOpts := elb.ConfigureHealthCheck{ + LoadBalancerName: d.Id(), + Check: elb.HealthCheck{ + HealthyThreshold: int64(check["healthy_threshold"].(int)), + UnhealthyThreshold: int64(check["unhealthy_threshold"].(int)), + Interval: int64(check["interval"].(int)), + Target: check["target"].(string), + Timeout: int64(check["timeout"].(int)), + }, + } + _, err := elbconn.ConfigureHealthCheck(&configureHealthCheckOpts) + if err != nil { + return fmt.Errorf("Failure configuring health check: %s", err) + } + d.SetPartial("health_check") + } + } + d.Partial(false) return resourceAwsElbRead(d, meta) } diff --git a/builtin/providers/aws/resource_aws_elb_test.go b/builtin/providers/aws/resource_aws_elb_test.go index cb5be7291..50563565b 100644 --- a/builtin/providers/aws/resource_aws_elb_test.go +++ b/builtin/providers/aws/resource_aws_elb_test.go @@ -152,6 +152,31 @@ func TestAccAWSELB_HealthCheck(t *testing.T) { }, }) } + +func TestAccAWSELBUpdate_HealthCheck(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSELBDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSELBConfigHealthCheck, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "aws_elb.bar", "health_check.3484319807.healthy_threshold", "5"), + ), + }, + resource.TestStep{ + Config: testAccAWSELBConfigHealthCheck_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "aws_elb.bar", "health_check.2648756019.healthy_threshold", "10"), + ), + }, + }, + }) +} + func testAccCheckAWSELBDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).elbconn @@ -418,3 +443,25 @@ resource "aws_elb" "bar" { } } ` + +const testAccAWSELBConfigHealthCheck_update = ` +resource "aws_elb" "bar" { + name = "foobar-terraform-test" + availability_zones = ["us-west-2a"] + + listener { + instance_port = 8000 + instance_protocol = "http" + lb_port = 80 + lb_protocol = "http" + } + + health_check { + healthy_threshold = 10 + unhealthy_threshold = 5 + target = "HTTP:8000/" + interval = 60 + timeout = 30 + } +} +` diff --git a/builtin/providers/aws/resource_aws_instance_test.go b/builtin/providers/aws/resource_aws_instance_test.go index 73da0197f..7275dde92 100644 --- a/builtin/providers/aws/resource_aws_instance_test.go +++ b/builtin/providers/aws/resource_aws_instance_test.go @@ -180,6 +180,8 @@ func TestAccInstance_tags(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists("aws_instance.foo", &v), testAccCheckTags(&v.Tags, "foo", "bar"), + // Guard against regression of https://github.com/hashicorp/terraform/issues/914 + testAccCheckTags(&v.Tags, "#", ""), ), }, diff --git a/builtin/providers/aws/resource_aws_main_route_table_association.go b/builtin/providers/aws/resource_aws_main_route_table_association.go new file mode 100644 index 000000000..f656f3760 --- /dev/null +++ b/builtin/providers/aws/resource_aws_main_route_table_association.go @@ -0,0 +1,155 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/mitchellh/goamz/ec2" +) + +func resourceAwsMainRouteTableAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsMainRouteTableAssociationCreate, + Read: resourceAwsMainRouteTableAssociationRead, + Update: resourceAwsMainRouteTableAssociationUpdate, + Delete: resourceAwsMainRouteTableAssociationDelete, + + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "route_table_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + // We use this field to record the main route table that is automatically + // created when the VPC is created. We need this to be able to "destroy" + // our main route table association, which we do by returning this route + // table to its original place as the Main Route Table for the VPC. + "original_route_table_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsMainRouteTableAssociationCreate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + vpcId := d.Get("vpc_id").(string) + routeTableId := d.Get("route_table_id").(string) + + log.Printf("[INFO] Creating main route table association: %s => %s", vpcId, routeTableId) + + mainAssociation, err := findMainRouteTableAssociation(ec2conn, vpcId) + if err != nil { + return err + } + + resp, err := ec2conn.ReassociateRouteTable( + mainAssociation.AssociationId, + routeTableId, + ) + if err != nil { + return err + } + + d.Set("original_route_table_id", mainAssociation.RouteTableId) + d.SetId(resp.AssociationId) + log.Printf("[INFO] New main route table association ID: %s", d.Id()) + + return nil +} + +func resourceAwsMainRouteTableAssociationRead(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + mainAssociation, err := findMainRouteTableAssociation( + ec2conn, + d.Get("vpc_id").(string)) + if err != nil { + return err + } + + if mainAssociation.AssociationId != d.Id() { + // It seems it doesn't exist anymore, so clear the ID + d.SetId("") + } + + return nil +} + +// Update is almost exactly like Create, except we want to retain the +// original_route_table_id - this needs to stay recorded as the AWS-created +// table from VPC creation. +func resourceAwsMainRouteTableAssociationUpdate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + vpcId := d.Get("vpc_id").(string) + routeTableId := d.Get("route_table_id").(string) + + log.Printf("[INFO] Updating main route table association: %s => %s", vpcId, routeTableId) + + resp, err := ec2conn.ReassociateRouteTable(d.Id(), routeTableId) + if err != nil { + return err + } + + d.SetId(resp.AssociationId) + log.Printf("[INFO] New main route table association ID: %s", d.Id()) + + return nil +} + +func resourceAwsMainRouteTableAssociationDelete(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + vpcId := d.Get("vpc_id").(string) + originalRouteTableId := d.Get("original_route_table_id").(string) + + log.Printf("[INFO] Deleting main route table association by resetting Main Route Table for VPC: %s to its original Route Table: %s", + vpcId, + originalRouteTableId) + + resp, err := ec2conn.ReassociateRouteTable(d.Id(), originalRouteTableId) + if err != nil { + return err + } + + log.Printf("[INFO] Resulting Association ID: %s", resp.AssociationId) + + return nil +} + +func findMainRouteTableAssociation(ec2conn *ec2.EC2, vpcId string) (*ec2.RouteTableAssociation, error) { + mainRouteTable, err := findMainRouteTable(ec2conn, vpcId) + if err != nil { + return nil, err + } + + for _, a := range mainRouteTable.Associations { + if a.Main { + return &a, nil + } + } + return nil, fmt.Errorf("Could not find main routing table association for VPC: %s", vpcId) +} + +func findMainRouteTable(ec2conn *ec2.EC2, vpcId string) (*ec2.RouteTable, error) { + filter := ec2.NewFilter() + filter.Add("association.main", "true") + filter.Add("vpc-id", vpcId) + routeResp, err := ec2conn.DescribeRouteTables(nil, filter) + if err != nil { + return nil, err + } else if len(routeResp.RouteTables) != 1 { + return nil, fmt.Errorf( + "Expected to find a single main routing table for VPC: %s, but found %d", + vpcId, + len(routeResp.RouteTables)) + } + + return &routeResp.RouteTables[0], nil +} diff --git a/builtin/providers/aws/resource_aws_main_route_table_association_test.go b/builtin/providers/aws/resource_aws_main_route_table_association_test.go new file mode 100644 index 000000000..937014cae --- /dev/null +++ b/builtin/providers/aws/resource_aws_main_route_table_association_test.go @@ -0,0 +1,148 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSMainRouteTableAssociation(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMainRouteTableAssociationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccMainRouteTableAssociationConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckMainRouteTableAssociation( + "aws_main_route_table_association.foo", + "aws_vpc.foo", + "aws_route_table.foo", + ), + ), + }, + resource.TestStep{ + Config: testAccMainRouteTableAssociationConfigUpdate, + Check: resource.ComposeTestCheckFunc( + testAccCheckMainRouteTableAssociation( + "aws_main_route_table_association.foo", + "aws_vpc.foo", + "aws_route_table.bar", + ), + ), + }, + }, + }) +} + +func testAccCheckMainRouteTableAssociationDestroy(s *terraform.State) error { + if len(s.RootModule().Resources) > 0 { + return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources) + } + + return nil +} + +func testAccCheckMainRouteTableAssociation( + mainRouteTableAssociationResource string, + vpcResource string, + routeTableResource string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[mainRouteTableAssociationResource] + if !ok { + return fmt.Errorf("Not found: %s", mainRouteTableAssociationResource) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + vpc, ok := s.RootModule().Resources[vpcResource] + if !ok { + return fmt.Errorf("Not found: %s", vpcResource) + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + mainAssociation, err := findMainRouteTableAssociation(conn, vpc.Primary.ID) + if err != nil { + return err + } + + if mainAssociation.AssociationId != rs.Primary.ID { + return fmt.Errorf("Found wrong main association: %s", + mainAssociation.AssociationId) + } + + return nil + } +} + +const testAccMainRouteTableAssociationConfig = ` +resource "aws_vpc" "foo" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_subnet" "foo" { + vpc_id = "${aws_vpc.foo.id}" + cidr_block = "10.1.1.0/24" +} + +resource "aws_internet_gateway" "foo" { + vpc_id = "${aws_vpc.foo.id}" +} + +resource "aws_route_table" "foo" { + vpc_id = "${aws_vpc.foo.id}" + route { + cidr_block = "10.0.0.0/8" + gateway_id = "${aws_internet_gateway.foo.id}" + } +} + +resource "aws_main_route_table_association" "foo" { + vpc_id = "${aws_vpc.foo.id}" + route_table_id = "${aws_route_table.foo.id}" +} +` + +const testAccMainRouteTableAssociationConfigUpdate = ` +resource "aws_vpc" "foo" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_subnet" "foo" { + vpc_id = "${aws_vpc.foo.id}" + cidr_block = "10.1.1.0/24" +} + +resource "aws_internet_gateway" "foo" { + vpc_id = "${aws_vpc.foo.id}" +} + +// Need to keep the old route table around when we update the +// main_route_table_association, otherwise Terraform will try to destroy the +// route table too early, and will fail because it's still the main one +resource "aws_route_table" "foo" { + vpc_id = "${aws_vpc.foo.id}" + route { + cidr_block = "10.0.0.0/8" + gateway_id = "${aws_internet_gateway.foo.id}" + } +} + +resource "aws_route_table" "bar" { + vpc_id = "${aws_vpc.foo.id}" + route { + cidr_block = "10.0.0.0/8" + gateway_id = "${aws_internet_gateway.foo.id}" + } +} + +resource "aws_main_route_table_association" "foo" { + vpc_id = "${aws_vpc.foo.id}" + route_table_id = "${aws_route_table.bar.id}" +} +` diff --git a/builtin/providers/aws/resource_aws_subnet.go b/builtin/providers/aws/resource_aws_subnet.go index 7bb88f58f..4e11785e7 100644 --- a/builtin/providers/aws/resource_aws_subnet.go +++ b/builtin/providers/aws/resource_aws_subnet.go @@ -41,6 +41,7 @@ func resourceAwsSubnet() *schema.Resource { "map_public_ip_on_launch": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Computed: true, }, "tags": tagsSchema(), diff --git a/builtin/providers/azure/config.go b/builtin/providers/azure/config.go new file mode 100644 index 000000000..4f093d591 --- /dev/null +++ b/builtin/providers/azure/config.go @@ -0,0 +1,30 @@ +package azure + +import ( + "fmt" + "log" + "os" + + azure "github.com/MSOpenTech/azure-sdk-for-go" +) + +type Config struct { + PublishSettingsFile string +} + +func (c *Config) loadAndValidate() error { + if _, err := os.Stat(c.PublishSettingsFile); os.IsNotExist(err) { + return fmt.Errorf( + "Error loading Azure Publish Settings file '%s': %s", + c.PublishSettingsFile, + err) + } + + log.Printf("[INFO] Importing Azure Publish Settings file...") + err := azure.ImportPublishSettingsFile(c.PublishSettingsFile) + if err != nil { + return err + } + + return nil +} diff --git a/builtin/providers/azure/provider.go b/builtin/providers/azure/provider.go new file mode 100644 index 000000000..199491e37 --- /dev/null +++ b/builtin/providers/azure/provider.go @@ -0,0 +1,48 @@ +package azure + +import ( + "os" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "publish_settings_file": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("AZURE_PUBLISH_SETTINGS_FILE"), + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "azure_virtual_machine": resourceVirtualMachine(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + PublishSettingsFile: d.Get("publish_settings_file").(string), + } + + if err := config.loadAndValidate(); err != nil { + return nil, err + } + + return &config, nil +} diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go new file mode 100644 index 000000000..4a40c5301 --- /dev/null +++ b/builtin/providers/azure/provider_test.go @@ -0,0 +1,35 @@ +package azure + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "azure": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("AZURE_PUBLISH_SETTINGS_FILE"); v == "" { + t.Fatal("AZURE_PUBLISH_SETTINGS_FILE must be set for acceptance tests") + } +} diff --git a/builtin/providers/azure/resource_virtual_machine.go b/builtin/providers/azure/resource_virtual_machine.go new file mode 100644 index 000000000..05f6e44ed --- /dev/null +++ b/builtin/providers/azure/resource_virtual_machine.go @@ -0,0 +1,242 @@ +package azure + +import ( + "bytes" + "fmt" + "log" + + "github.com/MSOpenTech/azure-sdk-for-go/clients/hostedServiceClient" + "github.com/MSOpenTech/azure-sdk-for-go/clients/vmClient" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceVirtualMachine() *schema.Resource { + return &schema.Resource{ + Create: resourceVirtualMachineCreate, + Read: resourceVirtualMachineRead, + Delete: resourceVirtualMachineDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "location": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "image": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + ForceNew: true, + }, + + "ssh_public_key_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + ForceNew: true, + }, + + "ssh_port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 22, + ForceNew: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, // This can be updatable once we support updates on the resource + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "local_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: resourceVirtualMachineEndpointHash, + }, + + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "vip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Creating Azure Virtual Machine Configuration...") + vmConfig, err := vmClient.CreateAzureVMConfiguration( + d.Get("name").(string), + d.Get("size").(string), + d.Get("image").(string), + d.Get("location").(string)) + if err != nil { + return fmt.Errorf("Error creating Azure virtual machine configuration: %s", err) + } + + // Only Linux VMs are supported. If we want to support other VM types, we need to + // grab the image details and based on the OS add the corresponding configuration. + log.Printf("[DEBUG] Adding Azure Linux Provisioning Configuration...") + vmConfig, err = vmClient.AddAzureLinuxProvisioningConfig( + vmConfig, + d.Get("username").(string), + d.Get("password").(string), + d.Get("ssh_public_key_file").(string), + d.Get("ssh_port").(int)) + if err != nil { + return fmt.Errorf("Error adding Azure linux provisioning configuration: %s", err) + } + + if v := d.Get("endpoint").(*schema.Set); v.Len() > 0 { + log.Printf("[DEBUG] Adding Endpoints to the Azure Virtual Machine...") + endpoints := make([]vmClient.InputEndpoint, v.Len()) + for i, v := range v.List() { + m := v.(map[string]interface{}) + endpoint := vmClient.InputEndpoint{} + endpoint.Name = m["name"].(string) + endpoint.Protocol = m["protocol"].(string) + endpoint.Port = m["port"].(int) + endpoint.LocalPort = m["local_port"].(int) + endpoints[i] = endpoint + } + + configSets := vmConfig.ConfigurationSets.ConfigurationSet + if len(configSets) == 0 { + return fmt.Errorf("Azure virtual machine does not have configuration sets") + } + for i := 0; i < len(configSets); i++ { + if configSets[i].ConfigurationSetType != "NetworkConfiguration" { + continue + } + configSets[i].InputEndpoints.InputEndpoint = + append(configSets[i].InputEndpoints.InputEndpoint, endpoints...) + } + } + + log.Printf("[DEBUG] Creating Azure Virtual Machine...") + err = vmClient.CreateAzureVM( + vmConfig, + d.Get("name").(string), + d.Get("location").(string)) + if err != nil { + return fmt.Errorf("Error creating Azure virtual machine: %s", err) + } + + d.SetId(d.Get("name").(string)) + + return resourceVirtualMachineRead(d, meta) +} + +func resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Getting Azure Virtual Machine Deployment: %s", d.Id()) + VMDeployment, err := vmClient.GetVMDeployment(d.Id(), d.Id()) + if err != nil { + return fmt.Errorf("Error getting Azure virtual machine deployment: %s", err) + } + + d.Set("url", VMDeployment.Url) + + roleInstances := VMDeployment.RoleInstanceList.RoleInstance + if len(roleInstances) == 0 { + return fmt.Errorf("Virtual Machine does not have IP addresses") + } + ipAddress := roleInstances[0].IpAddress + d.Set("ip_address", ipAddress) + + vips := VMDeployment.VirtualIPs.VirtualIP + if len(vips) == 0 { + return fmt.Errorf("Virtual Machine does not have VIP addresses") + } + vip := vips[0].Address + d.Set("vip_address", vip) + + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": vip, + "user": d.Get("username").(string), + }) + + return nil +} + +func resourceVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Deleting Azure Virtual Machine Deployment: %s", d.Id()) + if err := vmClient.DeleteVMDeployment(d.Id(), d.Id()); err != nil { + return fmt.Errorf("Error deleting Azure virtual machine deployment: %s", err) + } + + log.Printf("[DEBUG] Deleting Azure Hosted Service: %s", d.Id()) + if err := hostedServiceClient.DeleteHostedService(d.Id()); err != nil { + return fmt.Errorf("Error deleting Azure hosted service: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceVirtualMachineEndpointHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["local_port"].(int))) + + return hashcode.String(buf.String()) +} diff --git a/builtin/providers/azure/resource_virtual_machine_test.go b/builtin/providers/azure/resource_virtual_machine_test.go new file mode 100644 index 000000000..c519383d2 --- /dev/null +++ b/builtin/providers/azure/resource_virtual_machine_test.go @@ -0,0 +1,180 @@ +package azure + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/MSOpenTech/azure-sdk-for-go/clients/vmClient" +) + +func TestAccAzureVirtualMachine_Basic(t *testing.T) { + var VMDeployment vmClient.VMDeployment + + // The VM name can only be used once globally within azure, + // so we need to generate a random one + rand.Seed(time.Now().UnixNano()) + vmName := fmt.Sprintf("tf-test-vm-%d", rand.Int31()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAzureVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAzureVirtualMachineConfig_basic(vmName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAzureVirtualMachineExists("azure_virtual_machine.foobar", &VMDeployment), + testAccCheckAzureVirtualMachineAttributes(&VMDeployment, vmName), + resource.TestCheckResourceAttr( + "azure_virtual_machine.foobar", "name", vmName), + resource.TestCheckResourceAttr( + "azure_virtual_machine.foobar", "location", "West US"), + resource.TestCheckResourceAttr( + "azure_virtual_machine.foobar", "image", "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB"), + resource.TestCheckResourceAttr( + "azure_virtual_machine.foobar", "size", "Basic_A1"), + resource.TestCheckResourceAttr( + "azure_virtual_machine.foobar", "username", "foobar"), + ), + }, + }, + }) +} + +func TestAccAzureVirtualMachine_Endpoints(t *testing.T) { + var VMDeployment vmClient.VMDeployment + + // The VM name can only be used once globally within azure, + // so we need to generate a random one + rand.Seed(time.Now().UnixNano()) + vmName := fmt.Sprintf("tf-test-vm-%d", rand.Int31()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAzureVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAzureVirtualMachineConfig_endpoints(vmName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAzureVirtualMachineExists("azure_virtual_machine.foobar", &VMDeployment), + testAccCheckAzureVirtualMachineAttributes(&VMDeployment, vmName), + testAccCheckAzureVirtualMachineEndpoint(&VMDeployment, "tcp", 80), + ), + }, + }, + }) +} + +func testAccCheckAzureVirtualMachineDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "azure_virtual_machine" { + continue + } + + _, err := vmClient.GetVMDeployment(rs.Primary.ID, rs.Primary.ID) + if err == nil { + return fmt.Errorf("Azure Virtual Machine (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAzureVirtualMachineExists(n string, VMDeployment *vmClient.VMDeployment) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Azure Virtual Machine ID is set") + } + + retrieveVMDeployment, err := vmClient.GetVMDeployment(rs.Primary.ID, rs.Primary.ID) + if err != nil { + return err + } + + if retrieveVMDeployment.Name != rs.Primary.ID { + return fmt.Errorf("Azure Virtual Machine not found %s %s", VMDeployment.Name, rs.Primary.ID) + } + + *VMDeployment = *retrieveVMDeployment + + return nil + } +} + +func testAccCheckAzureVirtualMachineAttributes(VMDeployment *vmClient.VMDeployment, vmName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if VMDeployment.Name != vmName { + return fmt.Errorf("Bad name: %s != %s", VMDeployment.Name, vmName) + } + + return nil + } +} + +func testAccCheckAzureVirtualMachineEndpoint(VMDeployment *vmClient.VMDeployment, protocol string, publicPort int) resource.TestCheckFunc { + return func(s *terraform.State) error { + roleInstances := VMDeployment.RoleInstanceList.RoleInstance + if len(roleInstances) == 0 { + return fmt.Errorf("Azure virtual machine does not have role instances") + } + + for i := 0; i < len(roleInstances); i++ { + instanceEndpoints := roleInstances[i].InstanceEndpoints.InstanceEndpoint + if len(instanceEndpoints) == 0 { + return fmt.Errorf("Azure virtual machine does not have endpoints") + } + endpointFound := 0 + for j := 0; i < len(instanceEndpoints); i++ { + if instanceEndpoints[j].Protocol == protocol && instanceEndpoints[j].PublicPort == publicPort { + endpointFound = 1 + break + } + } + if endpointFound == 0 { + return fmt.Errorf("Azure virtual machine does not have endpoint %s/%d", protocol, publicPort) + } + } + + return nil + } +} + +func testAccCheckAzureVirtualMachineConfig_basic(vmName string) string { + return fmt.Sprintf(` +resource "azure_virtual_machine" "foobar" { + name = "%s" + location = "West US" + image = "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB" + size = "Basic_A1" + username = "foobar" +} +`, vmName) +} + +func testAccCheckAzureVirtualMachineConfig_endpoints(vmName string) string { + return fmt.Sprintf(` +resource "azure_virtual_machine" "foobar" { + name = "%s" + location = "West US" + image = "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB" + size = "Basic_A1" + username = "foobar" + endpoint { + name = "http" + protocol = "tcp" + port = 80 + local_port = 80 + } +} +`, vmName) +} diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 3a16dc0a0..37d662eaa 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -29,12 +29,15 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "google_compute_address": resourceComputeAddress(), - "google_compute_disk": resourceComputeDisk(), - "google_compute_firewall": resourceComputeFirewall(), - "google_compute_instance": resourceComputeInstance(), - "google_compute_network": resourceComputeNetwork(), - "google_compute_route": resourceComputeRoute(), + "google_compute_address": resourceComputeAddress(), + "google_compute_disk": resourceComputeDisk(), + "google_compute_firewall": resourceComputeFirewall(), + "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_http_health_check": resourceComputeHttpHealthCheck(), + "google_compute_instance": resourceComputeInstance(), + "google_compute_network": resourceComputeNetwork(), + "google_compute_route": resourceComputeRoute(), + "google_compute_target_pool": resourceComputeTargetPool(), }, ConfigureFunc: providerConfigure, diff --git a/builtin/providers/google/resource_compute_address.go b/builtin/providers/google/resource_compute_address.go index a8f1ecf0c..98aa838c2 100644 --- a/builtin/providers/google/resource_compute_address.go +++ b/builtin/providers/google/resource_compute_address.go @@ -27,6 +27,12 @@ func resourceComputeAddress() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, } } @@ -90,6 +96,7 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error } d.Set("address", addr.Address) + d.Set("self_link", addr.SelfLink) return nil } @@ -98,6 +105,7 @@ func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) erro config := meta.(*Config) // Delete the address + log.Printf("[DEBUG] address delete request") op, err := config.clientCompute.Addresses.Delete( config.Project, config.Region, d.Id()).Do() if err != nil { diff --git a/builtin/providers/google/resource_compute_firewall.go b/builtin/providers/google/resource_compute_firewall.go index dfd020cc4..9cbe5b53b 100644 --- a/builtin/providers/google/resource_compute_firewall.go +++ b/builtin/providers/google/resource_compute_firewall.go @@ -26,6 +26,11 @@ func resourceComputeFirewall() *schema.Resource { ForceNew: true, }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "network": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -306,6 +311,7 @@ func resourceFirewall( // Build the firewall parameter return &compute.Firewall{ Name: d.Get("name").(string), + Description: d.Get("description").(string), Network: network.SelfLink, Allowed: allowed, SourceRanges: sourceRanges, diff --git a/builtin/providers/google/resource_compute_firewall_test.go b/builtin/providers/google/resource_compute_firewall_test.go index 58a6fd787..9bb92af20 100644 --- a/builtin/providers/google/resource_compute_firewall_test.go +++ b/builtin/providers/google/resource_compute_firewall_test.go @@ -126,6 +126,7 @@ resource "google_compute_network" "foobar" { resource "google_compute_firewall" "foobar" { name = "terraform-test" + description = "Resource created for Terraform acceptance testing" network = "${google_compute_network.foobar.name}" source_tags = ["foo"] @@ -142,6 +143,7 @@ resource "google_compute_network" "foobar" { resource "google_compute_firewall" "foobar" { name = "terraform-test" + description = "Resource created for Terraform acceptance testing" network = "${google_compute_network.foobar.name}" source_tags = ["foo"] diff --git a/builtin/providers/google/resource_compute_forwarding_rule.go b/builtin/providers/google/resource_compute_forwarding_rule.go new file mode 100644 index 000000000..269ff611c --- /dev/null +++ b/builtin/providers/google/resource_compute_forwarding_rule.go @@ -0,0 +1,219 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeForwardingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeForwardingRuleCreate, + Read: resourceComputeForwardingRuleRead, + Delete: resourceComputeForwardingRuleDelete, + Update: resourceComputeForwardingRuleUpdate, + + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + }, + } +} + +func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + frule := &compute.ForwardingRule{ + IPAddress: d.Get("ip_address").(string), + IPProtocol: d.Get("ip_protocol").(string), + Description: d.Get("description").(string), + Name: d.Get("name").(string), + PortRange: d.Get("port_range").(string), + Target: d.Get("target").(string), + } + + log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) + op, err := config.clientCompute.ForwardingRules.Insert( + config.Project, config.Region, frule).Do() + if err != nil { + return fmt.Errorf("Error creating ForwardingRule: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(frule.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("target") { + target_name := d.Get("target").(string) + target_ref := &compute.TargetReference{Target: target_name} + op, err := config.clientCompute.ForwardingRules.SetTarget( + config.Project, config.Region, d.Id(), target_ref).Do() + if err != nil { + return fmt.Errorf("Error updating target: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to update target: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + d.SetPartial("target") + } + + d.Partial(false) + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + frule, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + + d.Set("ip_address", frule.IPAddress) + d.Set("ip_protocol", frule.IPProtocol) + d.Set("self_link", frule.SelfLink) + + return nil +} + +func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the ForwardingRule + log.Printf("[DEBUG] ForwardingRule delete request") + op, err := config.clientCompute.ForwardingRules.Delete( + config.Project, config.Region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting ForwardingRule: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} + diff --git a/builtin/providers/google/resource_compute_forwarding_rule_test.go b/builtin/providers/google/resource_compute_forwarding_rule_test.go new file mode 100644 index 000000000..c3aa365df --- /dev/null +++ b/builtin/providers/google/resource_compute_forwarding_rule_test.go @@ -0,0 +1,125 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeForwardingRule_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeForwardingRule_ip(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_ip, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_forwarding_rule" { + continue + } + + _, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("ForwardingRule still exists") + } + } + + return nil +} + +func testAccCheckComputeForwardingRuleExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("ForwardingRule not found") + } + + return nil + } +} + +const testAccComputeForwardingRule_basic = ` +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "terraform-test" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +` + +const testAccComputeForwardingRule_ip = ` +resource "google_compute_address" "foo" { + name = "foo" +} +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_address = "${google_compute_address.foo.address}" + ip_protocol = "TCP" + name = "terraform-test" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +` + diff --git a/builtin/providers/google/resource_compute_http_health_check.go b/builtin/providers/google/resource_compute_http_health_check.go new file mode 100644 index 000000000..68a4c1348 --- /dev/null +++ b/builtin/providers/google/resource_compute_http_health_check.go @@ -0,0 +1,279 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeHttpHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHttpHealthCheckCreate, + Read: resourceComputeHttpHealthCheckRead, + Delete: resourceComputeHttpHealthCheckDelete, + Update: resourceComputeHttpHealthCheckUpdate, + + Schema: map[string]*schema.Schema{ + "check_interval_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "healthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "unhealthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + hchk := &compute.HttpHealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) + } + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("health_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + log.Printf("[DEBUG] HttpHealthCheck insert request: %#v", hchk) + op, err := config.clientCompute.HttpHealthChecks.Insert( + config.Project, hchk).Do() + if err != nil { + return fmt.Errorf("Error creating HttpHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + hchk := &compute.HttpHealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) + } + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("health_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + log.Printf("[DEBUG] HttpHealthCheck patch request: %#v", hchk) + op, err := config.clientCompute.HttpHealthChecks.Patch( + config.Project, hchk.Name, hchk).Do() + if err != nil { + return fmt.Errorf("Error patching HttpHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to patch: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hchk, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + + d.Set("host", hchk.Host) + d.Set("request_path", hchk.RequestPath) + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("health_threshold", hchk.HealthyThreshold) + d.Set("port", hchk.Port) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) + d.Set("self_link", hchk.SelfLink) + + return nil +} + +func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the HttpHealthCheck + op, err := config.clientCompute.HttpHealthChecks.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting HttpHealthCheck: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/google/resource_compute_http_health_check_test.go b/builtin/providers/google/resource_compute_http_health_check_test.go new file mode 100644 index 000000000..1797e9831 --- /dev/null +++ b/builtin/providers/google/resource_compute_http_health_check_test.go @@ -0,0 +1,85 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeHttpHealthCheck_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + "google_compute_http_health_check.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_http_health_check" { + continue + } + + _, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("HttpHealthCheck still exists") + } + } + + return nil +} + +func testAccCheckComputeHttpHealthCheckExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("HttpHealthCheck not found") + } + + return nil + } +} + +const testAccComputeHttpHealthCheck_basic = ` +resource "google_compute_http_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + host = "foobar" + name = "terraform-test" + port = "80" + request_path = "/health_check" + timeout_sec = 2 + unhealthy_threshold = 3 +} +` diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index e4438d876..578b1a942 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -75,20 +75,61 @@ func resourceComputeInstance() *schema.Resource { }, }, + "network_interface": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "access_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "network": &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "source": &schema.Schema{ Type: schema.TypeString, Required: true, + ForceNew: true, }, "address": &schema.Schema{ Type: schema.TypeString, Optional: true, + ForceNew: true, }, "name": &schema.Schema{ @@ -169,10 +210,42 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } +func resourceOperationWaitZone( + config *Config, op *compute.Operation, zone string, activity string) error { + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: zone, + Type: OperationWaitZone, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + return nil +} + + func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -260,32 +333,80 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disks = append(disks, &disk) } - // Build up the list of networks networksCount := d.Get("network.#").(int) - networks := make([]*compute.NetworkInterface, 0, networksCount) - for i := 0; i < networksCount; i++ { - prefix := fmt.Sprintf("network.%d", i) - // Load up the name of this network - networkName := d.Get(prefix + ".source").(string) - network, err := config.clientCompute.Networks.Get( - config.Project, networkName).Do() - if err != nil { - return fmt.Errorf( - "Error loading network '%s': %s", - networkName, err) - } + networkInterfacesCount := d.Get("network_interface.#").(int) - // Build the disk - var iface compute.NetworkInterface - iface.AccessConfigs = []*compute.AccessConfig{ - &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", - NatIP: d.Get(prefix + ".address").(string), - }, - } - iface.Network = network.SelfLink + if networksCount > 0 && networkInterfacesCount > 0 { + return fmt.Errorf("Error: cannot define both networks and network_interfaces.") + } + if networksCount == 0 && networkInterfacesCount == 0 { + return fmt.Errorf("Error: Must define at least one network_interface.") + } - networks = append(networks, &iface) + var networkInterfaces []*compute.NetworkInterface + + if networksCount > 0 { + // TODO: Delete this block when removing network { } + // Build up the list of networkInterfaces + networkInterfaces = make([]*compute.NetworkInterface, 0, networksCount) + for i := 0; i < networksCount; i++ { + prefix := fmt.Sprintf("network.%d", i) + // Load up the name of this network + networkName := d.Get(prefix + ".source").(string) + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error loading network '%s': %s", + networkName, err) + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.AccessConfigs = []*compute.AccessConfig{ + &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(prefix + ".address").(string), + }, + } + iface.Network = network.SelfLink + + networkInterfaces = append(networkInterfaces, &iface) + } + } + + if networkInterfacesCount > 0 { + // Build up the list of networkInterfaces + networkInterfaces = make([]*compute.NetworkInterface, 0, networkInterfacesCount) + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + // Load up the name of this network_interfac + networkName := d.Get(prefix + ".network").(string) + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error referencing network '%s': %s", + networkName, err) + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.Network = network.SelfLink + + // Handle access_config structs + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + iface.AccessConfigs[j] = &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + } + + networkInterfaces = append(networkInterfaces, &iface) + } } serviceAccountsCount := d.Get("service_account.#").(int) @@ -316,7 +437,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err MachineType: machineType.SelfLink, Metadata: resourceInstanceMetadata(d), Name: d.Get("name").(string), - NetworkInterfaces: networks, + NetworkInterfaces: networkInterfaces, Tags: resourceInstanceTags(d), ServiceAccounts: serviceAccounts, } @@ -332,28 +453,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err d.SetId(instance.Name) // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: zone.Name, - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { + waitErr := resourceOperationWaitZone(config, op, zone.Name, "instance to create") + if waitErr != nil { // The resource didn't actually create d.SetId("") - - // Return the error - return OperationError(*op.Error) + return waitErr } return resourceComputeInstanceRead(d, meta) @@ -387,26 +491,85 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } } + networksCount := d.Get("network.#").(int) + networkInterfacesCount := d.Get("network_interface.#").(int) + + if networksCount > 0 && networkInterfacesCount > 0 { + return fmt.Errorf("Error: cannot define both networks and network_interfaces.") + } + if networksCount == 0 && networkInterfacesCount == 0 { + return fmt.Errorf("Error: Must define at least one network_interface.") + } + // Set the networks + // Use the first external IP found for the default connection info. externalIP := "" - for i, iface := range instance.NetworkInterfaces { - prefix := fmt.Sprintf("network.%d", i) - d.Set(prefix+".name", iface.Name) + internalIP := "" + if networksCount > 0 { + // TODO: Remove this when realizing deprecation of .network + for i, iface := range instance.NetworkInterfaces { + prefix := fmt.Sprintf("network.%d", i) + d.Set(prefix+".name", iface.Name) + log.Printf(prefix+".name = %s", iface.Name) - // Use the first external IP found for the default connection info. - natIP := resourceInstanceNatIP(iface) - if externalIP == "" && natIP != "" { - externalIP = natIP + var natIP string + for _, config := range iface.AccessConfigs { + if config.Type == "ONE_TO_ONE_NAT" { + natIP = config.NatIP + break + } + } + + if externalIP == "" && natIP != "" { + externalIP = natIP + } + d.Set(prefix+".external_address", natIP) + + d.Set(prefix+".internal_address", iface.NetworkIP) } - d.Set(prefix+".external_address", natIP) + } - d.Set(prefix+".internal_address", iface.NetworkIP) + if networkInterfacesCount > 0 { + for i, iface := range instance.NetworkInterfaces { + + prefix := fmt.Sprintf("network_interface.%d", i) + d.Set(prefix+".name", iface.Name) + + // The first non-empty ip is left in natIP + var natIP string + for j, config := range iface.AccessConfigs { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + d.Set(acPrefix+".nat_ip", config.NatIP) + if natIP == "" { + natIP = config.NatIP + } + } + + if externalIP == "" { + externalIP = natIP + } + + d.Set(prefix+".address", iface.NetworkIP) + if internalIP == "" { + internalIP = iface.NetworkIP + } + + + } + } + + // Fall back on internal ip if there is no external ip. This makes sense in the situation where + // terraform is being used on a cloud instance and can therefore access the instances it creates + // via their internal ips. + sshIP := externalIP + if sshIP == "" { + sshIP = internalIP } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", - "host": externalIP, + "host": sshIP, }) // Set the metadata fingerprint if there is one. @@ -419,12 +582,29 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("tags_fingerprint", instance.Tags.Fingerprint) } + d.Set("self_link", instance.SelfLink) + return nil } func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + zone := d.Get("zone").(string) + + instance, err := config.clientCompute.Instances.Get( + config.Project, zone, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading instance: %s", err) + } + // Enable partial mode for the resource since it is possible d.Partial(true) @@ -432,30 +612,15 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("metadata") { metadata := resourceInstanceMetadata(d) op, err := config.clientCompute.Instances.SetMetadata( - config.Project, d.Get("zone").(string), d.Id(), metadata).Do() + config.Project, zone, d.Id(), metadata).Do() if err != nil { return fmt.Errorf("Error updating metadata: %s", err) } - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 1 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for metadata to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + // 1 5 2 + opErr := resourceOperationWaitZone(config, op, zone, "metadata to update") + if opErr != nil { + return opErr } d.SetPartial("metadata") @@ -464,35 +629,80 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("tags") { tags := resourceInstanceTags(d) op, err := config.clientCompute.Instances.SetTags( - config.Project, d.Get("zone").(string), d.Id(), tags).Do() + config.Project, zone, d.Id(), tags).Do() if err != nil { return fmt.Errorf("Error updating tags: %s", err) } - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 1 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for tags to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + opErr := resourceOperationWaitZone(config, op, zone, "tags to update") + if opErr != nil { + return opErr } d.SetPartial("tags") } + networkInterfacesCount := d.Get("network_interface.#").(int) + if networkInterfacesCount > 0 { + // Sanity check + if networkInterfacesCount != len(instance.NetworkInterfaces) { + return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) + } + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + instNetworkInterface := instance.NetworkInterfaces[i] + networkName := d.Get(prefix+".name").(string) + + // TODO: This sanity check is broken by #929, disabled for now (by forcing the equality) + networkName = instNetworkInterface.Name + // Sanity check + if networkName != instNetworkInterface.Name { + return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name) + } + + if d.HasChange(prefix+".access_config") { + + // TODO: This code deletes then recreates accessConfigs. This is bad because it may + // leave the machine inaccessible from either ip if the creation part fails (network + // timeout etc). However right now there is a GCE limit of 1 accessConfig so it is + // the only way to do it. In future this should be revised to only change what is + // necessary, and also add before removing. + + // Delete any accessConfig that currently exists in instNetworkInterface + for _, ac := range(instNetworkInterface.AccessConfigs) { + op, err := config.clientCompute.Instances.DeleteAccessConfig( + config.Project, zone, d.Id(), networkName, ac.Name).Do(); + if err != nil { + return fmt.Errorf("Error deleting old access_config: %s", err) + } + opErr := resourceOperationWaitZone(config, op, zone, "old access_config to delete") + if opErr != nil { + return opErr + } + } + + // Create new ones + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + ac := &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + op, err := config.clientCompute.Instances.AddAccessConfig( + config.Project, zone, d.Id(), networkName, ac).Do(); + if err != nil { + return fmt.Errorf("Error adding new access_config: %s", err) + } + opErr := resourceOperationWaitZone(config, op, zone, "new access_config to add") + if opErr != nil { + return opErr + } + } + } + } + } + // We made it, disable partial mode d.Partial(false) @@ -502,32 +712,16 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - op, err := config.clientCompute.Instances.Delete( - config.Project, d.Get("zone").(string), d.Id()).Do() + zone := d.Get("zone").(string) + op, err := config.clientCompute.Instances.Delete(config.Project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting instance: %s", err) } // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 5 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + opErr := resourceOperationWaitZone(config, op, zone, "instance to delete") + if opErr != nil { + return opErr } d.SetId("") @@ -579,16 +773,3 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { return tags } - -// resourceInstanceNatIP acquires the first NatIP with a "ONE_TO_ONE_NAT" type -// in the compute.NetworkInterface's AccessConfigs. -func resourceInstanceNatIP(iface *compute.NetworkInterface) (natIP string) { - for _, config := range iface.AccessConfigs { - if config.Type == "ONE_TO_ONE_NAT" { - natIP = config.NatIP - break - } - } - - return natIP -} diff --git a/builtin/providers/google/resource_compute_instance_test.go b/builtin/providers/google/resource_compute_instance_test.go index 424351993..9d16db521 100644 --- a/builtin/providers/google/resource_compute_instance_test.go +++ b/builtin/providers/google/resource_compute_instance_test.go @@ -10,6 +10,28 @@ import ( "github.com/hashicorp/terraform/terraform" ) +func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + ), + }, + }, + }) +} + func TestAccComputeInstance_basic(t *testing.T) { var instance compute.Instance @@ -32,6 +54,50 @@ func TestAccComputeInstance_basic(t *testing.T) { }) } +func TestAccComputeInstance_basic2(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic3(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic3, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + ), + }, + }, + }) +} + func TestAccComputeInstance_IP(t *testing.T) { var instance compute.Instance @@ -45,7 +111,7 @@ func TestAccComputeInstance_IP(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceNetwork(&instance), + testAccCheckComputeInstanceAccessConfigHasIP(&instance), ), }, }, @@ -73,6 +139,35 @@ func TestAccComputeInstance_disks(t *testing.T) { }) } +func TestAccComputeInstance_update_deprecated_network(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + resource.TestStep{ + Config: testAccComputeInstance_update_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "bar", "baz"), + testAccCheckComputeInstanceTag(&instance, "baz"), + ), + }, + }, + }) +} + func TestAccComputeInstance_update(t *testing.T) { var instance compute.Instance @@ -96,6 +191,7 @@ func TestAccComputeInstance_update(t *testing.T) { testAccCheckComputeInstanceMetadata( &instance, "bar", "baz"), testAccCheckComputeInstanceTag(&instance, "baz"), + testAccCheckComputeInstanceAccessConfig(&instance), ), }, }, @@ -173,7 +269,19 @@ func testAccCheckComputeInstanceMetadata( } } -func testAccCheckComputeInstanceNetwork(instance *compute.Instance) resource.TestCheckFunc { +func testAccCheckComputeInstanceAccessConfig(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if len(i.AccessConfigs) == 0 { + return fmt.Errorf("no access_config") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceAccessConfigHasIP(instance *compute.Instance) resource.TestCheckFunc { return func(s *terraform.State) error { for _, i := range instance.NetworkInterfaces { for _, c := range i.AccessConfigs { @@ -219,7 +327,7 @@ func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resour } } -const testAccComputeInstance_basic = ` +const testAccComputeInstance_basic_deprecated_network = ` resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" @@ -240,6 +348,47 @@ resource "google_compute_instance" "foobar" { } }` +const testAccComputeInstance_update_deprecated_network = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["baz"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network { + source = "default" + } + + metadata { + bar = "baz" + } +}` + +const testAccComputeInstance_basic = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } +}` + const testAccComputeInstance_basic2 = ` resource "google_compute_instance" "foobar" { name = "terraform-test" @@ -252,10 +401,11 @@ resource "google_compute_instance" "foobar" { image = "debian-cloud/debian-7-wheezy-v20140814" } - network { - source = "default" + network_interface { + network = "default" } + metadata { foo = "bar" } @@ -273,8 +423,8 @@ resource "google_compute_instance" "foobar" { image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814" } - network { - source = "default" + network_interface { + network = "default" } metadata { @@ -282,6 +432,7 @@ resource "google_compute_instance" "foobar" { } }` +// Update metadata, tags, and network_interface const testAccComputeInstance_update = ` resource "google_compute_instance" "foobar" { name = "terraform-test" @@ -293,8 +444,9 @@ resource "google_compute_instance" "foobar" { image = "debian-7-wheezy-v20140814" } - network { - source = "default" + network_interface { + network = "default" + access_config { } } metadata { @@ -317,9 +469,11 @@ resource "google_compute_instance" "foobar" { image = "debian-7-wheezy-v20140814" } - network { - source = "default" - address = "${google_compute_address.foo.address}" + network_interface { + network = "default" + access_config { + nat_ip = "${google_compute_address.foo.address}" + } } metadata { @@ -349,8 +503,8 @@ resource "google_compute_instance" "foobar" { auto_delete = false } - network { - source = "default" + network_interface { + network = "default" } metadata { diff --git a/builtin/providers/google/resource_compute_target_pool.go b/builtin/providers/google/resource_compute_target_pool.go new file mode 100644 index 000000000..bbf095900 --- /dev/null +++ b/builtin/providers/google/resource_compute_target_pool.go @@ -0,0 +1,404 @@ +package google + +import ( + "fmt" + "log" + "strings" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeTargetPool() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetPoolCreate, + Read: resourceComputeTargetPoolRead, + Delete: resourceComputeTargetPoolDelete, + Update: resourceComputeTargetPoolUpdate, + + Schema: map[string]*schema.Schema{ + "backup_pool": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "failover_ratio": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + }, + + "health_checks": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "instances": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "session_affinity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func convertStringArr(ifaceArr []interface{}) []string { + arr := make([]string, len(ifaceArr)) + for i, v := range ifaceArr { + arr[i] = v.(string) + } + return arr +} + +func waitOp(config *Config, op *compute.Operation, + resource string, action string) (*compute.Operation, error) { + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err) + } + return opRaw.(*compute.Operation), nil +} + +// Healthchecks need to exist before being referred to from the target pool. +func convertHealthChecks(config *Config, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + // Look up the healthcheck + res, err := config.clientCompute.HttpHealthChecks.Get(config.Project, name).Do() + if err != nil { + return nil, fmt.Errorf("Error reading HealthCheck: %s", err) + } + urls[i] = res.SelfLink + } + return urls, nil +} + +// Instances do not need to exist yet, so we simply generate URLs. +// Instances can be full URLS or zone/name +func convertInstances(config *Config, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { + urls[i] = name + } else { + splitName := strings.Split(name, "/") + if len(splitName) != 2 { + return nil, fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name) + } else { + urls[i] = fmt.Sprintf( + "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", + config.Project, splitName[0], splitName[1]) + } + } + } + return urls, nil +} + +func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hchkUrls, err := convertHealthChecks( + config, convertStringArr(d.Get("health_checks").([]interface{}))) + if err != nil { + return err + } + + instanceUrls, err := convertInstances( + config, convertStringArr(d.Get("instances").([]interface{}))) + if err != nil { + return err + } + + // Build the parameter + tpool := &compute.TargetPool{ + BackupPool: d.Get("backup_pool").(string), + Description: d.Get("description").(string), + HealthChecks: hchkUrls, + Instances: instanceUrls, + Name: d.Get("name").(string), + SessionAffinity: d.Get("session_affinity").(string), + } + if d.Get("failover_ratio") != nil { + tpool.FailoverRatio = d.Get("failover_ratio").(float64) + } + log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) + op, err := config.clientCompute.TargetPools.Insert( + config.Project, config.Region, tpool).Do() + if err != nil { + return fmt.Errorf("Error creating TargetPool: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(tpool.Name) + + op, err = waitOp(config, op, "TargetPool", "create") + if err != nil { + return err + } + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeTargetPoolRead(d, meta) +} + +func calcAddRemove(from []string, to []string) ([]string, []string) { + add := make([]string, 0) + remove := make([]string, 0) + for _, u := range to { + found := false + for _, v := range from { + if u == v { + found = true + break + } + } + if !found { + add = append(add, u) + } + } + for _, u := range from { + found := false + for _, v := range to { + if u == v { + found = true + break + } + } + if !found { + remove = append(remove, u) + } + } + return add, remove +} + + +func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("health_checks") { + + from_, to_ := d.GetChange("health_checks") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertHealthChecks(config, from) + if err != nil { + return err + } + toUrls, err := convertHealthChecks(config, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(remove)), + } + for i, v := range remove { + removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err := config.clientCompute.TargetPools.RemoveHealthCheck( + config.Project, config.Region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "removing HealthChecks") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + addReq := &compute.TargetPoolsAddHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(add)), + } + for i, v := range add { + addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err = config.clientCompute.TargetPools.AddHealthCheck( + config.Project, config.Region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "adding HealthChecks") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("health_checks") + } + + if d.HasChange("instances") { + + from_, to_ := d.GetChange("instances") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertInstances(config, from) + if err != nil { + return err + } + toUrls, err := convertInstances(config, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + addReq := &compute.TargetPoolsAddInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(add)), + } + for i, v := range add { + addReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err := config.clientCompute.TargetPools.AddInstance( + config.Project, config.Region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "adding instances") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + removeReq := &compute.TargetPoolsRemoveInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(remove)), + } + for i, v := range remove { + removeReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err = config.clientCompute.TargetPools.RemoveInstance( + config.Project, config.Region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "removing instances") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("instances") + } + + if d.HasChange("backup_pool") { + bpool_name := d.Get("backup_pool").(string) + tref := &compute.TargetReference{ + Target: bpool_name, + } + op, err := config.clientCompute.TargetPools.SetBackup( + config.Project, config.Region, d.Id(), tref).Do() + if err != nil { + return fmt.Errorf("Error updating backup_pool: %s", err) + } + + op, err = waitOp(config, op, "TargetPool", "updating backup_pool") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("backup_pool") + } + + d.Partial(false) + + return resourceComputeTargetPoolRead(d, meta) +} + +func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + tpool, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading TargetPool: %s", err) + } + + d.Set("self_link", tpool.SelfLink) + + return nil +} + +func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the TargetPool + op, err := config.clientCompute.TargetPools.Delete( + config.Project, config.Region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetPool: %s", err) + } + + op, err = waitOp(config, op, "TargetPool", "delete") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/google/resource_compute_target_pool_test.go b/builtin/providers/google/resource_compute_target_pool_test.go new file mode 100644 index 000000000..4a65eaac6 --- /dev/null +++ b/builtin/providers/google/resource_compute_target_pool_test.go @@ -0,0 +1,80 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeTargetPool_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetPoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetPool_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetPoolExists( + "google_compute_target_pool.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetPoolDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_target_pool" { + continue + } + + _, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("TargetPool still exists") + } + } + + return nil +} + +func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("TargetPool not found") + } + + return nil + } +} + +const testAccComputeTargetPool_basic = ` +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" + session_affinity = "CLIENT_IP_PROTO" +}` diff --git a/checkpoint.go b/checkpoint.go index ebe592b32..4837e4763 100644 --- a/checkpoint.go +++ b/checkpoint.go @@ -34,7 +34,7 @@ func runCheckpoint(c *Config) { version := Version if VersionPrerelease != "" { - version += fmt.Sprintf(".%s", VersionPrerelease) + version += fmt.Sprintf("-%s", VersionPrerelease) } signaturePath := filepath.Join(configDir, "checkpoint_signature") diff --git a/command/version.go b/command/version.go index 729f55aa7..d4c3a2f89 100644 --- a/command/version.go +++ b/command/version.go @@ -38,7 +38,7 @@ func (c *VersionCommand) Run(args []string) int { fmt.Fprintf(&versionString, "Terraform v%s", c.Version) if c.VersionPrerelease != "" { - fmt.Fprintf(&versionString, ".%s", c.VersionPrerelease) + fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) if c.Revision != "" { fmt.Fprintf(&versionString, " (%s)", c.Revision) diff --git a/config/module/detect.go b/config/module/detect.go index f70e69a47..84e1a1d79 100644 --- a/config/module/detect.go +++ b/config/module/detect.go @@ -2,7 +2,6 @@ package module import ( "fmt" - "net/url" "path/filepath" ) @@ -41,7 +40,7 @@ func Detect(src string, pwd string) (string, error) { u, err := urlParse(getSrc) if err == nil && u.Scheme != "" { // Valid URL - return u.String(), nil + return src, nil } for _, d := range Detectors { @@ -67,7 +66,7 @@ func Detect(src string, pwd string) (string, error) { } } if subDir != "" { - u, err := url.Parse(result) + u, err := urlParse(result) if err != nil { return "", fmt.Errorf("Error parsing URL: %s", err) } diff --git a/config/module/detect_test.go b/config/module/detect_test.go index 69a5a6fef..a81bba12b 100644 --- a/config/module/detect_test.go +++ b/config/module/detect_test.go @@ -31,6 +31,12 @@ func TestDetect(t *testing.T) { "git::https://github.com/hashicorp/foo.git//bar", false, }, + { + "git::https://github.com/hashicorp/consul.git", + "", + "git::https://github.com/hashicorp/consul.git", + false, + }, } for i, tc := range cases { diff --git a/config/module/get_hg.go b/config/module/get_hg.go index a979eacfd..666762080 100644 --- a/config/module/get_hg.go +++ b/config/module/get_hg.go @@ -5,6 +5,7 @@ import ( "net/url" "os" "os/exec" + "runtime" ) // HgGetter is a Getter implementation that will download a module from @@ -16,34 +17,40 @@ func (g *HgGetter) Get(dst string, u *url.URL) error { return fmt.Errorf("hg must be available and on the PATH") } + newURL, err := urlParse(u.String()) + if err != nil { + return err + } + if fixWindowsDrivePath(newURL) { + // See valid file path form on http://www.selenic.com/hg/help/urls + newURL.Path = fmt.Sprintf("/%s", newURL.Path) + } + // Extract some query parameters we use var rev string - q := u.Query() + q := newURL.Query() if len(q) > 0 { rev = q.Get("rev") q.Del("rev") - // Copy the URL - var newU url.URL = *u - u = &newU - u.RawQuery = q.Encode() + newURL.RawQuery = q.Encode() } - _, err := os.Stat(dst) + _, err = os.Stat(dst) if err != nil && !os.IsNotExist(err) { return err } if err != nil { - if err := g.clone(dst, u); err != nil { + if err := g.clone(dst, newURL); err != nil { return err } } - if err := g.pull(dst, u); err != nil { + if err := g.pull(dst, newURL); err != nil { return err } - return g.update(dst, u, rev) + return g.update(dst, newURL, rev) } func (g *HgGetter) clone(dst string, u *url.URL) error { @@ -67,3 +74,14 @@ func (g *HgGetter) update(dst string, u *url.URL, rev string) error { cmd.Dir = dst return getRunCommand(cmd) } + +func fixWindowsDrivePath(u *url.URL) bool { + // hg assumes a file:/// prefix for Windows drive letter file paths. + // (e.g. file:///c:/foo/bar) + // If the URL Path does not begin with a '/' character, the resulting URL + // path will have a file:// prefix. (e.g. file://c:/foo/bar) + // See http://www.selenic.com/hg/help/urls and the examples listed in + // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 + return runtime.GOOS == "windows" && u.Scheme == "file" && + len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' +} diff --git a/config/module/url_helper.go b/config/module/url_helper.go index 58c4a8967..792761927 100644 --- a/config/module/url_helper.go +++ b/config/module/url_helper.go @@ -1,55 +1,63 @@ -package module - -import ( - "fmt" - "net/url" - "path/filepath" - "runtime" -) - -func urlParse(rawURL string) (*url.URL, error) { - if runtime.GOOS == "windows" { - if len(rawURL) > 1 && rawURL[1] == ':' { - // Assume we're dealing with a file path. - rawURL = fmtFileURL(rawURL) - } else { - // Make sure we're using "/" on Windows. URLs are "/"-based. - rawURL = filepath.ToSlash(rawURL) - } - } - u, err := url.Parse(rawURL) - if err != nil { - return nil, err - } - - if runtime.GOOS != "windows" { - return u, err - } - - if u.Scheme != "file" { - return u, err - } - - // Remove leading slash for absolute file paths on Windows. - // For example, url.Parse yields u.Path = "/C:/Users/user" for - // rawurl = "file:///C:/Users/user", which is an incorrect syntax. - if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { - u.Path = u.Path[1:] - } - - return u, err -} - -func fmtFileURL(path string) string { - if runtime.GOOS == "windows" { - // Make sure we're using "/" on Windows. URLs are "/"-based. - path = filepath.ToSlash(path) - } - - // Make sure that we don't start with "/" since we add that below. - if path[0] == '/' { - path = path[1:] - } - - return fmt.Sprintf("file:///%s", path) -} +package module + +import ( + "fmt" + "net/url" + "path/filepath" + "runtime" + "strings" +) + +func urlParse(rawURL string) (*url.URL, error) { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + rawURL = filepath.ToSlash(rawURL) + } + u, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + if runtime.GOOS != "windows" { + return u, err + } + + if len(rawURL) > 1 && rawURL[1] == ':' { + // Assume we're dealing with a drive letter file path on Windows. + // We need to adjust the URL Path for drive letter file paths + // because url.Parse("c:/users/user") yields URL Scheme = "c" + // and URL path = "/users/user". + u.Path = fmt.Sprintf("%s:%s", u.Scheme, u.Path) + u.Scheme = "" + } + + if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { + // Assume we're dealing with a drive letter file path on Windows + // where the drive letter has been parsed into the URL Host. + u.Path = fmt.Sprintf("%s%s", u.Host, u.Path) + u.Host = "" + } + + // Remove leading slash for absolute file paths on Windows. + // For example, url.Parse yields u.Path = "/C:/Users/user" for + // rawURL = "file:///C:/Users/user", which is an incorrect syntax. + if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { + u.Path = u.Path[1:] + } + + return u, err +} + +func fmtFileURL(path string) string { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + path = filepath.ToSlash(path) + } + + // Make sure that we don't start with "/" since we add that below. + if path[0] == '/' { + path = path[1:] + } + + return fmt.Sprintf("file:///%s", path) +} diff --git a/examples/aws-count/outputs.tf b/examples/aws-count/outputs.tf index fd703a8e2..96e7fd52b 100644 --- a/examples/aws-count/outputs.tf +++ b/examples/aws-count/outputs.tf @@ -1,3 +1,3 @@ output "address" { - value = "Instances: ${aws_instance.web.*.id}" + value = "Instances: ${element(aws_instance.web.*.id, 0)}" } diff --git a/helper/schema/field_reader_diff.go b/helper/schema/field_reader_diff.go index aaacd5d68..ec875421b 100644 --- a/helper/schema/field_reader_diff.go +++ b/helper/schema/field_reader_diff.go @@ -82,6 +82,11 @@ func (r *DiffFieldReader) readMap( if !strings.HasPrefix(k, prefix) { continue } + if strings.HasPrefix(k, prefix+"#") { + // Ignore the count field + continue + } + resultSet = true k = k[len(prefix):] @@ -148,8 +153,8 @@ func (r *DiffFieldReader) readSet( if !strings.HasPrefix(k, prefix) { continue } - if strings.HasPrefix(k, prefix+"#") { - // Ignore the count field + if strings.HasSuffix(k, "#") { + // Ignore any count field continue } diff --git a/helper/schema/field_reader_diff_test.go b/helper/schema/field_reader_diff_test.go index cc07bc013..fbb10fcaf 100644 --- a/helper/schema/field_reader_diff_test.go +++ b/helper/schema/field_reader_diff_test.go @@ -11,6 +11,51 @@ func TestDiffFieldReader_impl(t *testing.T) { var _ FieldReader = new(DiffFieldReader) } +// https://github.com/hashicorp/terraform/issues/914 +func TestDiffFieldReader_MapHandling(t *testing.T) { + schema := map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + }, + } + r := &DiffFieldReader{ + Schema: schema, + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "tags.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "tags.baz": &terraform.ResourceAttrDiff{ + Old: "", + New: "qux", + }, + }, + }, + Source: &MapFieldReader{ + Schema: schema, + Map: BasicMapReader(map[string]string{ + "tags.#": "1", + "tags.foo": "bar", + }), + }, + } + + result, err := r.ReadField([]string{"tags"}) + if err != nil { + t.Fatalf("ReadField failed: %#v", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "baz": "qux", + } + + if !reflect.DeepEqual(expected, result.Value) { + t.Fatalf("bad: DiffHandling\n\nexpected: %#v\n\ngot: %#v\n\n", expected, result.Value) + } +} + func TestDiffFieldReader_extra(t *testing.T) { schema := map[string]*Schema{ "stringComputed": &Schema{Type: TypeString}, diff --git a/helper/schema/schema.go b/helper/schema/schema.go index f16df2d72..fc52548a4 100644 --- a/helper/schema/schema.go +++ b/helper/schema/schema.go @@ -11,8 +11,6 @@ // A good starting point is to view the Provider structure. package schema -//go:generate stringer -type=ValueType - import ( "fmt" "os" @@ -25,47 +23,6 @@ import ( "github.com/mitchellh/mapstructure" ) -// ValueType is an enum of the type that can be represented by a schema. -type ValueType int - -const ( - TypeInvalid ValueType = iota - TypeBool - TypeInt - TypeFloat - TypeString - TypeList - TypeMap - TypeSet - typeObject -) - -// Zero returns the zero value for a type. -func (t ValueType) Zero() interface{} { - switch t { - case TypeInvalid: - return nil - case TypeBool: - return false - case TypeInt: - return 0 - case TypeFloat: - return 0.0 - case TypeString: - return "" - case TypeList: - return []interface{}{} - case TypeMap: - return map[string]interface{}{} - case TypeSet: - return nil - case typeObject: - return map[string]interface{}{} - default: - panic(fmt.Sprintf("unknown type %s", t)) - } -} - // Schema is used to describe the structure of a value. // // Read the documentation of the struct elements for important details. diff --git a/helper/schema/valuetype.go b/helper/schema/valuetype.go new file mode 100644 index 000000000..b7b7ac810 --- /dev/null +++ b/helper/schema/valuetype.go @@ -0,0 +1,46 @@ +package schema + +//go:generate stringer -type=ValueType valuetype.go + +import "fmt" + +// ValueType is an enum of the type that can be represented by a schema. +type ValueType int + +const ( + TypeInvalid ValueType = iota + TypeBool + TypeInt + TypeFloat + TypeString + TypeList + TypeMap + TypeSet + typeObject +) + +// Zero returns the zero value for a type. +func (t ValueType) Zero() interface{} { + switch t { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return 0 + case TypeFloat: + return 0.0 + case TypeString: + return "" + case TypeList: + return []interface{}{} + case TypeMap: + return map[string]interface{}{} + case TypeSet: + return nil + case typeObject: + return map[string]interface{}{} + default: + panic(fmt.Sprintf("unknown type %s", t)) + } +} diff --git a/helper/schema/valuetype_string.go b/helper/schema/valuetype_string.go index c011d3ec2..fec00944e 100644 --- a/helper/schema/valuetype_string.go +++ b/helper/schema/valuetype_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=ValueType; DO NOT EDIT +// generated by stringer -type=ValueType valuetype.go; DO NOT EDIT package schema diff --git a/website/source/assets/javascripts/app/Engine.js b/website/source/assets/javascripts/app/Engine.js index a918be3b0..e89d61074 100644 --- a/website/source/assets/javascripts/app/Engine.js +++ b/website/source/assets/javascripts/app/Engine.js @@ -68,7 +68,10 @@ Engine = Base.extend({ this.background.className += ' show'; this.canvas.style.opacity = 1; - new Chainable() + // We have to pass the engine into Chainable to + // enable the timers to properly attach to the + // run/render loop + new Chainable(this) .wait(1000) .then(function(){ this.starGeneratorRate = 200; @@ -202,6 +205,13 @@ Engine = Base.extend({ this.now = Date.now() / 1000; this.tick = Math.min(this.now - this.last, 0.017); + // We must attach the chainable timer to the engine + // run/render loop or else things can get pretty + // out of wack + if (this.updateChainTimer) { + this.updateChainTimer(this.tick); + } + // Update all particles... may need to be optimized for (p = 0; p < this.particles.length; p++) { this.particles[p].update(this); diff --git a/website/source/assets/javascripts/lib/Chainable.js b/website/source/assets/javascripts/lib/Chainable.js index edb7f1757..dbe51dd1d 100644 --- a/website/source/assets/javascripts/lib/Chainable.js +++ b/website/source/assets/javascripts/lib/Chainable.js @@ -1,12 +1,29 @@ (function(){ -var Chainable = function(){ +var Chainable = function(engine){ + this.engine = engine; this._chain = []; + this._updateTimer = this._updateTimer.bind(this); this._cycle = this._cycle.bind(this); }; Chainable.prototype._running = false; +Chainable.prototype._updateTimer = function(tick){ + this._timer += tick; + if (this._timer >= this._timerMax) { + this.resetTimer(); + this._cycle(); + } +}; + +Chainable.prototype.resetTimer = function(){ + this.engine.updateChainTimer = undefined; + this._timer = 0; + this._timerMax = 0; + return this; +}; + Chainable.prototype.start = function(){ if (this._running || !this._chain.length) { return this; @@ -19,9 +36,8 @@ Chainable.prototype.reset = function(){ if (!this._running) { return this; } - clearTimeout(this._timer); - this._timer = null; - this._chain.length = 0; + this.resetTimer(); + this._timer = 0; this._running = false; return this; }; @@ -40,8 +56,10 @@ Chainable.prototype._cycle = function(){ return this._cycle(); } if (current.type === 'wait') { - clearTimeout(this._timer); - this._timer = setTimeout(this._cycle, current.time || 0); + this.resetTimer(); + // Convert timer to seconds + this._timerMax = current.time / 1000; + this.engine.updateChainTimer = this._updateTimer; current = null; } diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index a0d2ce807..cb1686a6e 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -16,6 +16,7 @@ body.layout-heroku, body.layout-mailgun, body.layout-digitalocean, body.layout-aws, +body.layout-azure, body.layout-docs, body.layout-inner, body.layout-downloads, diff --git a/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown b/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown new file mode 100644 index 000000000..a89d2ddee --- /dev/null +++ b/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown @@ -0,0 +1,44 @@ +--- +layout: "aws" +page_title: "AWS: aws_main_route_table_association" +sidebar_current: "docs-aws-resource-main-route-table-assoc" +description: |- + Provides a resource for managing the main routing table of a VPC. +--- + +# aws\_main\_route\_table\_association + +Provides a resource for managing the main routing table of a VPC. + +## Example Usage + +``` +resource "aws_main_route_table_association" "a" { + vpc_id = "${aws_vpc.foo.id}" + route_table_id = "${aws_route_table.bar.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `vpc_id` - (Required) The ID of the VPC whose main route table should be set +* `route_table_id` - (Required) The ID of the Route Table to set as the new + main route table for the target VPC + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the Route Table Association +* `original_route_table_id` - Used internally, see __Notes__ below + +## Notes + +On VPC creation, the AWS API always creates an initial Main Route Table. This +resource records the ID of that Route Table under `original_route_table_id`. +The "Delete" action for a `main_route_table_association` consists of resetting +this original table as the Main Route Table for the VPC. You'll see this +additional Route Table in the AWS console; it must remain intact in order for +the `main_route_table_association` delete to work properly. diff --git a/website/source/docs/providers/aws/r/vpc.html.markdown b/website/source/docs/providers/aws/r/vpc.html.markdown index f2ab8da16..48e56d340 100644 --- a/website/source/docs/providers/aws/r/vpc.html.markdown +++ b/website/source/docs/providers/aws/r/vpc.html.markdown @@ -53,6 +53,7 @@ The following attributes are exported: * `enable_dns_support` - Whether or not the VPC has DNS support * `enable_dns_hostnames` - Whether or not the VPC has DNS hostname support * `main_route_table_id` - The ID of the main route table associated with - this VPC. + this VPC. Note that you can change a VPC's main route table by using an + [`aws_main_route_table_association`](/docs/providers/aws/r/main_route_table_assoc.html). * `default_network_acl_id` - The ID of the network ACL created by default on VPC creation * `default_security_group_id` - The ID of the security group created by default on VPC creation diff --git a/website/source/docs/providers/azure/index.html.markdown b/website/source/docs/providers/azure/index.html.markdown new file mode 100644 index 000000000..4991ae632 --- /dev/null +++ b/website/source/docs/providers/azure/index.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "azure" +page_title: "Provider: Microsoft Azure" +sidebar_current: "docs-azure-index" +description: |- + The Azure provider is used to interact with Microsoft Azure services. The provider needs to be configured with the proper credentials before it can be used. +--- + +# Azure Provider + +The Azure provider is used to interact with +[Microsoft Azure](http://azure.microsoft.com/). The provider needs +to be configured with the proper credentials before it can be used. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the Azure provider +provider "azure" { + publish_settings_file = "account.publishsettings" +} + +# Create a new instance +resource "azure_virtual_machine" "default" { + ... +} +``` + +## Argument Reference + +The following keys can be used to configure the provider. + +* `publish_settings_file` - (Required) Path to the JSON file used to describe + your account settings, downloaded from Microsoft Azure. It must be provided, + but it can also be sourced from the AZURE_PUBLISH_SETTINGS_FILE environment variable. diff --git a/website/source/docs/providers/azure/r/virtual_machine.html.markdown b/website/source/docs/providers/azure/r/virtual_machine.html.markdown new file mode 100644 index 000000000..946f3b11d --- /dev/null +++ b/website/source/docs/providers/azure/r/virtual_machine.html.markdown @@ -0,0 +1,71 @@ +--- +layout: "azure" +page_title: "Azure: azure_virtual_machine" +sidebar_current: "docs-azure-resource-virtual-machine" +description: |- + Manages a Virtual Machine resource within Azure. +--- + +# azure\_virtual\_machine + +Manages a Virtual Machine resource within Azure. + +## Example Usage + +``` +resource "azure_virtual_machine" "default" { + name = "test" + location = "West US" + image = "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB" + size = "Basic_A1" + username = "${var.username}" + password = ""${var.password}" + ssh_public_key_file = "${var.azure_ssh_public_key_file}" + endpoint { + name = "http" + protocol = "tcp" + port = 80 + local_port = 80 + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) A name for the virtual machine. It must use between 3 and + 24 lowercase letters and numbers and it must be unique within Azure. + +* `location` - (Required) The location that the virtual machine should be created in. + +* `image` - (Required) A image to be used to create the virtual machine. + +* `size` - (Required) Size that you want to use for the virtual machine. + +* `username` - (Required) Name of the account that you will use to administer + the virtual machine. You cannot use root for the user name. + +* `password` - (Optional) Password for the admin account. + +* `ssh_public_key_file` - (Optional) SSH key (PEM format). + +* `ssh_port` - (Optional) SSH port. + +* `endpoint` - (Optional) Can be specified multiple times for each + endpoint rule. Each endpoint block supports fields documented below. + +The `endpoint` block supports: + +* `name` - (Required) The name of the endpoint. +* `protocol` - (Required) The protocol. +* `port` - (Required) The public port. +* `local_port` - (Required) The private port. + +## Attributes Reference + +The following attributes are exported: + +* `url` - The URL for the virtual machine deployment. +* `ip_address` - The internal IP address of the virtual machine. +* `vip_address` - The public Virtual IP address of the virtual machine. diff --git a/website/source/docs/providers/google/r/compute_address.html.markdown b/website/source/docs/providers/google/r/compute_address.html.markdown index 5365fa2b6..c0551c11f 100644 --- a/website/source/docs/providers/google/r/compute_address.html.markdown +++ b/website/source/docs/providers/google/r/compute_address.html.markdown @@ -8,7 +8,10 @@ description: |- # google\_compute\_address -Creates a static IP address resource for Google Compute Engine. +Creates a static IP address resource for Google Compute Engine. For more information see +[the official documentation](https://cloud.google.com/compute/docs/instances-and-network) and +[API](https://cloud.google.com/compute/docs/reference/latest/addresses). + ## Example Usage @@ -31,3 +34,4 @@ The following attributes are exported: * `name` - The name of the resource. * `address` - The IP address that was allocated. +* `self_link` - The URI of the created resource. diff --git a/website/source/docs/providers/google/r/compute_firewall.html.markdown b/website/source/docs/providers/google/r/compute_firewall.html.markdown index 638a4bfd2..f0ed797db 100644 --- a/website/source/docs/providers/google/r/compute_firewall.html.markdown +++ b/website/source/docs/providers/google/r/compute_firewall.html.markdown @@ -37,6 +37,8 @@ The following arguments are supported: * `name` - (Required) A unique name for the resource, required by GCE. Changing this forces a new resource to be created. +* `description` - (Optional) Textual description field. + * `network` - (Required) The name of the network to attach this firewall to. * `allow` - (Required) Can be specified multiple times for each allow diff --git a/website/source/docs/providers/google/r/compute_forwarding_rule.html.markdown b/website/source/docs/providers/google/r/compute_forwarding_rule.html.markdown new file mode 100644 index 000000000..9e8313189 --- /dev/null +++ b/website/source/docs/providers/google/r/compute_forwarding_rule.html.markdown @@ -0,0 +1,53 @@ +--- +layout: "google" +page_title: "Google: google_compute_forwarding_rule" +sidebar_current: "docs-google-resource-forwarding_rule" +description: |- + Manages a Target Pool within GCE. +--- + +# google\_compute\_forwarding\_rule + +Manages a Forwarding Rule within GCE. This binds an ip and port range to a target pool. For more +information see [the official +documentation](https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules) and +[API](https://cloud.google.com/compute/docs/reference/latest/forwardingRules). + +## Example Usage + +``` +resource "google_compute_forwarding_rule" "default" { + name = "test" + target = "${google_compute_target_pool.default.self_link}" + port_range = "80" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `description` - (Optional) Textual description field. + +* `ip_address` - (Optional) The static IP. (if not set, an ephemeral IP is +used). + +* `ip_protocol` - (Optional) The IP protocol to route, one of "TCP" "UDP" "AH" "ESP" or "SCTP". (default "TCP"). + +* `name` - (Required) A unique name for the resource, required by GCE. Changing + this forces a new resource to be created. + +* `port_range` - (Optional) A range e.g. "1024-2048" or a single port "1024" +(defaults to all ports!). + +* `target` - URL of target pool. + +## Attributes Reference + +The following attributes are exported: + +* `self_link` - The URL of the created resource. + +* `ip_address` - The IP address that was chosen (or specified). + + diff --git a/website/source/docs/providers/google/r/compute_http_health_check.html.markdown b/website/source/docs/providers/google/r/compute_http_health_check.html.markdown new file mode 100644 index 000000000..4a4cd3481 --- /dev/null +++ b/website/source/docs/providers/google/r/compute_http_health_check.html.markdown @@ -0,0 +1,57 @@ +--- +layout: "google" +page_title: "Google: google_compute_http_health_check" +sidebar_current: "docs-google-resource-http_health_check" +description: |- + Manages an HTTP Health Check within GCE. +--- + +# google\_compute\_http\_health\_check + +Manages an HTTP health check within GCE. This is used to monitor instances +behind load balancers. Timeouts or HTTP errors cause the instance to be +removed from the pool. For more information, see [the official +documentation](https://cloud.google.com/compute/docs/load-balancing/health-checks) +and +[API](https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks). + +## Example Usage + +``` +resource "google_compute_http_health_check" "default" { + name = "test" + request_path = "/health_check" + check_interval_sec = 1 + timeout_sec = 1 +} +``` + +## Argument Reference + +The following arguments are supported: + +* `check_interval_sec` - (Optional) How often to poll each instance (default 5). + +* `description` - (Optional) Textual description field. + +* `healthy_threshold` - (Optional) Consecutive successes required (default 2). + +* `host` - (Optional) HTTP host header field (default instance's public ip). + +* `name` - (Required) A unique name for the resource, required by GCE. + Changing this forces a new resource to be created. + +* `port` - (Optional) TCP port to connect to (default 80). + +* `request_path` - (Optional) URL path to query (default /). + +* `timeout_sec` - (Optional) How long before declaring failure (default 5). + +* `unhealthy_threshold` - (Optional) Consecutive failures required (default 2). + + +## Attributes Reference + +The following attributes are exported: + +* `self_link` - The URL of the created resource. diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown index a9cc69fb2..5c6cfe027 100644 --- a/website/source/docs/providers/google/r/compute_instance.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance.html.markdown @@ -8,7 +8,11 @@ description: |- # google\_compute\_instance -Manages a VM instance resource within GCE. +Manages a VM instance resource within GCE. For more information see +[the official documentation](https://cloud.google.com/compute/docs/instances) +and +[API](https://cloud.google.com/compute/docs/reference/latest/instances). + ## Example Usage @@ -23,8 +27,11 @@ resource "google_compute_instance" "default" { image = "debian-7-wheezy-v20140814" } - network { - source = "default" + network_interface { + network = "default" + access_config { + // Ephemeral IP + } } metadata { @@ -60,7 +67,11 @@ The following arguments are supported: * `metadata` - (Optional) Metadata key/value pairs to make available from within the instance. -* `network` - (Required) Networks to attach to the instance. This can be +* `network_interface` - (Required) Networks to attach to the instance. This can be + specified multiple times for multiple networks. Structure is documented + below. + +* `network` - (DEPRECATED, Required) Networks to attach to the instance. This can be specified multiple times for multiple networks. Structure is documented below. @@ -82,7 +93,22 @@ The `disk` block supports: * `type` - (Optional) The GCE disk type. -The `network` block supports: +The `network_interface` block supports: + +* `network` - (Required) The name of the network to attach this interface to. + +* `access_config` - (Optional) Access configurations, i.e. IPs via which this instance can be + accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet +(this means that ssh provisioners will not work unless you are running Terraform can send traffic to +the instance's network (e.g. via tunnel or because it is running on another cloud instance on that +network). This block can be repeated multiple times. Structure documented below. + +The `access_config` block supports: + +* `nat_ip` - (Optional) The IP address that will be 1:1 mapped to the instance's network ip. If not + given, one will be generated. + +(DEPRECATED) The `network` block supports: * `source` - (Required) The name of the network to attach this interface to. diff --git a/website/source/docs/providers/google/r/compute_target_pool.html.markdown b/website/source/docs/providers/google/r/compute_target_pool.html.markdown new file mode 100644 index 000000000..1efc5905e --- /dev/null +++ b/website/source/docs/providers/google/r/compute_target_pool.html.markdown @@ -0,0 +1,58 @@ +--- +layout: "google" +page_title: "Google: google_compute_target_pool" +sidebar_current: "docs-google-resource-target_pool" +description: |- + Manages a Target Pool within GCE. +--- + +# google\_compute\_target\_pool + +Manages a Target Pool within GCE. This is a collection of instances used as +target of a network load balancer (Forwarding Rule). For more information see +[the official +documentation](https://cloud.google.com/compute/docs/load-balancing/network/target-pools) +and [API](https://cloud.google.com/compute/docs/reference/latest/targetPools). + + +## Example Usage + +``` +resource "google_compute_target_pool" "default" { + name = "test" + instances = [ "us-central1-a/myinstance1", "us-central1-b/myinstance2" ] + health_checks = [ "${google_compute_http_health_check.default.name}" ] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `backup_pool` - (Optional) URL to the backup target pool. Must also set + failover\_ratio. + +* `description` - (Optional) Textual description field. + +* `failover_ratio` - (Optional) Ratio (0 to 1) of failed nodes before using the + backup pool (which must also be set). + +* `health_checks` - (Optional) List of zero or one healthcheck names. + +* `instances` - (Optional) List of instances in the pool. They can be given as + URLs, or in the form of "zone/name". Note that the instances need not exist + at the time of target pool creation, so there is no need to use the Terraform + interpolators to create a dependency on the instances from the target pool. + +* `name` - (Required) A unique name for the resource, required by GCE. Changing + this forces a new resource to be created. + +* `session_affinity` - (Optional) How to distribute load. Options are "NONE" (no affinity). "CLIENT\_IP" (hash of the source/dest addresses / ports), and "CLIENT\_IP\_PROTO" also includes the protocol (default "NONE"). + + +## Attributes Reference + +The following attributes are exported: + +* `self_link` - The URL of the created resource. + diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index d79f44580..030192dfd 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -53,6 +53,10 @@ aws_launch_configuration + > + aws_main_route_table_association + + > aws_network_acl diff --git a/website/source/layouts/azure.erb b/website/source/layouts/azure.erb new file mode 100644 index 000000000..918a12469 --- /dev/null +++ b/website/source/layouts/azure.erb @@ -0,0 +1,26 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + + <% end %> + + <%= yield %> +<% end %> diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index c71ac5a2e..8e07b6104 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -112,6 +112,10 @@ AWS + > + Azure + + > CloudFlare