From 2923261ed7f28799a43ab39a695ba063fb58cc0c Mon Sep 17 00:00:00 2001 From: Ryan Lewon Date: Wed, 28 Jan 2015 21:19:52 -0800 Subject: [PATCH 01/32] Fix for outputs. --- examples/aws-count/outputs.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/aws-count/outputs.tf b/examples/aws-count/outputs.tf index fd703a8e2..96e7fd52b 100644 --- a/examples/aws-count/outputs.tf +++ b/examples/aws-count/outputs.tf @@ -1,3 +1,3 @@ output "address" { - value = "Instances: ${aws_instance.web.*.id}" + value = "Instances: ${element(aws_instance.web.*.id, 0)}" } From 6046647f38971b1796fcdf6e3c53e2f732d77bd9 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Thu, 20 Nov 2014 12:40:17 -0500 Subject: [PATCH 02/32] Multiple gcp improvements and new resources --- .gitignore | 2 + builtin/providers/google/provider.go | 15 +- .../google/resource_compute_address.go | 8 + .../resource_compute_forwarding_rule.go | 219 ++++++++++ .../resource_compute_forwarding_rule_test.go | 125 ++++++ .../resource_compute_http_health_check.go | 260 +++++++++++ ...resource_compute_http_health_check_test.go | 85 ++++ .../google/resource_compute_instance.go | 24 ++ .../google/resource_compute_target_pool.go | 404 ++++++++++++++++++ .../resource_compute_target_pool_test.go | 80 ++++ .../google/r/compute_address.html.markdown | 6 +- .../r/compute_forwarding_rule.html.markdown | 53 +++ .../r/compute_http_health_check.html.markdown | 57 +++ .../google/r/compute_instance.html.markdown | 6 +- .../r/compute_target_pool.html.markdown | 58 +++ 15 files changed, 1394 insertions(+), 8 deletions(-) create mode 100644 builtin/providers/google/resource_compute_forwarding_rule.go create mode 100644 builtin/providers/google/resource_compute_forwarding_rule_test.go create mode 100644 builtin/providers/google/resource_compute_http_health_check.go create mode 100644 builtin/providers/google/resource_compute_http_health_check_test.go create mode 100644 builtin/providers/google/resource_compute_target_pool.go create mode 100644 builtin/providers/google/resource_compute_target_pool_test.go create mode 100644 website/source/docs/providers/google/r/compute_forwarding_rule.html.markdown create mode 100644 website/source/docs/providers/google/r/compute_http_health_check.html.markdown create mode 100644 website/source/docs/providers/google/r/compute_target_pool.html.markdown diff --git a/.gitignore b/.gitignore index e852cc3b5..e0b6954f8 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,5 @@ website/node_modules *.tfstate *.log *.bak +*~ +.*.swp diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 3a16dc0a0..37d662eaa 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -29,12 +29,15 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "google_compute_address": resourceComputeAddress(), - "google_compute_disk": resourceComputeDisk(), - "google_compute_firewall": resourceComputeFirewall(), - "google_compute_instance": resourceComputeInstance(), - "google_compute_network": resourceComputeNetwork(), - "google_compute_route": resourceComputeRoute(), + "google_compute_address": resourceComputeAddress(), + "google_compute_disk": resourceComputeDisk(), + "google_compute_firewall": resourceComputeFirewall(), + "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_http_health_check": resourceComputeHttpHealthCheck(), + "google_compute_instance": resourceComputeInstance(), + "google_compute_network": resourceComputeNetwork(), + "google_compute_route": resourceComputeRoute(), + "google_compute_target_pool": resourceComputeTargetPool(), }, ConfigureFunc: providerConfigure, diff --git a/builtin/providers/google/resource_compute_address.go b/builtin/providers/google/resource_compute_address.go index a8f1ecf0c..98aa838c2 100644 --- a/builtin/providers/google/resource_compute_address.go +++ b/builtin/providers/google/resource_compute_address.go @@ -27,6 +27,12 @@ func resourceComputeAddress() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, } } @@ -90,6 +96,7 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error } d.Set("address", addr.Address) + d.Set("self_link", addr.SelfLink) return nil } @@ -98,6 +105,7 @@ func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) erro config := meta.(*Config) // Delete the address + log.Printf("[DEBUG] address delete request") op, err := config.clientCompute.Addresses.Delete( config.Project, config.Region, d.Id()).Do() if err != nil { diff --git a/builtin/providers/google/resource_compute_forwarding_rule.go b/builtin/providers/google/resource_compute_forwarding_rule.go new file mode 100644 index 000000000..269ff611c --- /dev/null +++ b/builtin/providers/google/resource_compute_forwarding_rule.go @@ -0,0 +1,219 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeForwardingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeForwardingRuleCreate, + Read: resourceComputeForwardingRuleRead, + Delete: resourceComputeForwardingRuleDelete, + Update: resourceComputeForwardingRuleUpdate, + + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + }, + } +} + +func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + frule := &compute.ForwardingRule{ + IPAddress: d.Get("ip_address").(string), + IPProtocol: d.Get("ip_protocol").(string), + Description: d.Get("description").(string), + Name: d.Get("name").(string), + PortRange: d.Get("port_range").(string), + Target: d.Get("target").(string), + } + + log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) + op, err := config.clientCompute.ForwardingRules.Insert( + config.Project, config.Region, frule).Do() + if err != nil { + return fmt.Errorf("Error creating ForwardingRule: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(frule.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("target") { + target_name := d.Get("target").(string) + target_ref := &compute.TargetReference{Target: target_name} + op, err := config.clientCompute.ForwardingRules.SetTarget( + config.Project, config.Region, d.Id(), target_ref).Do() + if err != nil { + return fmt.Errorf("Error updating target: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to update target: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + d.SetPartial("target") + } + + d.Partial(false) + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + frule, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + + d.Set("ip_address", frule.IPAddress) + d.Set("ip_protocol", frule.IPProtocol) + d.Set("self_link", frule.SelfLink) + + return nil +} + +func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the ForwardingRule + log.Printf("[DEBUG] ForwardingRule delete request") + op, err := config.clientCompute.ForwardingRules.Delete( + config.Project, config.Region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting ForwardingRule: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} + diff --git a/builtin/providers/google/resource_compute_forwarding_rule_test.go b/builtin/providers/google/resource_compute_forwarding_rule_test.go new file mode 100644 index 000000000..c3aa365df --- /dev/null +++ b/builtin/providers/google/resource_compute_forwarding_rule_test.go @@ -0,0 +1,125 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeForwardingRule_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeForwardingRule_ip(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_ip, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_forwarding_rule" { + continue + } + + _, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("ForwardingRule still exists") + } + } + + return nil +} + +func testAccCheckComputeForwardingRuleExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("ForwardingRule not found") + } + + return nil + } +} + +const testAccComputeForwardingRule_basic = ` +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "terraform-test" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +` + +const testAccComputeForwardingRule_ip = ` +resource "google_compute_address" "foo" { + name = "foo" +} +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_address = "${google_compute_address.foo.address}" + ip_protocol = "TCP" + name = "terraform-test" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +` + diff --git a/builtin/providers/google/resource_compute_http_health_check.go b/builtin/providers/google/resource_compute_http_health_check.go new file mode 100644 index 000000000..f4887641a --- /dev/null +++ b/builtin/providers/google/resource_compute_http_health_check.go @@ -0,0 +1,260 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeHttpHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHttpHealthCheckCreate, + Read: resourceComputeHttpHealthCheckRead, + Delete: resourceComputeHttpHealthCheckDelete, + Update: resourceComputeHttpHealthCheckUpdate, + + Schema: map[string]*schema.Schema{ + "check_interval_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "healthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + + "unhealthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + }, + } +} + +func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + hchk := &compute.HttpHealthCheck{ + Description: d.Get("description").(string), + Host: d.Get("host").(string), + Name: d.Get("name").(string), + RequestPath: d.Get("request_path").(string), + } + if d.Get("check_interval_sec") != nil { + hchk.CheckIntervalSec = int64(d.Get("check_interval_sec").(int)) + } + if d.Get("health_threshold") != nil { + hchk.HealthyThreshold = int64(d.Get("healthy_threshold").(int)) + } + if d.Get("port") != nil { + hchk.Port = int64(d.Get("port").(int)) + } + if d.Get("timeout") != nil { + hchk.TimeoutSec = int64(d.Get("timeout_sec").(int)) + } + if d.Get("unhealthy_threshold") != nil { + hchk.UnhealthyThreshold = int64(d.Get("unhealthy_threshold").(int)) + } + + log.Printf("[DEBUG] HttpHealthCheck insert request: %#v", hchk) + op, err := config.clientCompute.HttpHealthChecks.Insert( + config.Project, hchk).Do() + if err != nil { + return fmt.Errorf("Error creating HttpHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + hchk := &compute.HttpHealthCheck{ + Description: d.Get("description").(string), + Host: d.Get("host").(string), + Name: d.Get("name").(string), + RequestPath: d.Get("request_path").(string), + } + if d.Get("check_interval_sec") != nil { + hchk.CheckIntervalSec = int64(d.Get("check_interval_sec").(int)) + } + if d.Get("health_threshold") != nil { + hchk.HealthyThreshold = int64(d.Get("healthy_threshold").(int)) + } + if d.Get("port") != nil { + hchk.Port = int64(d.Get("port").(int)) + } + if d.Get("timeout") != nil { + hchk.TimeoutSec = int64(d.Get("timeout_sec").(int)) + } + if d.Get("unhealthy_threshold") != nil { + hchk.UnhealthyThreshold = int64(d.Get("unhealthy_threshold").(int)) + } + + log.Printf("[DEBUG] HttpHealthCheck patch request: %#v", hchk) + op, err := config.clientCompute.HttpHealthChecks.Patch( + config.Project, hchk.Name, hchk).Do() + if err != nil { + return fmt.Errorf("Error patching HttpHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to patch: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hchk, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + + d.Set("self_link", hchk.SelfLink) + + return nil +} + +func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the HttpHealthCheck + op, err := config.clientCompute.HttpHealthChecks.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting HttpHealthCheck: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/google/resource_compute_http_health_check_test.go b/builtin/providers/google/resource_compute_http_health_check_test.go new file mode 100644 index 000000000..45181a4cd --- /dev/null +++ b/builtin/providers/google/resource_compute_http_health_check_test.go @@ -0,0 +1,85 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeHttpHealthCheck_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + "google_compute_http_health_check.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_http_health_check" { + continue + } + + _, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("HttpHealthCheck still exists") + } + } + + return nil +} + +func testAccCheckComputeHttpHealthCheckExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("HttpHealthCheck not found") + } + + return nil + } +} + +const testAccComputeHttpHealthCheck_basic = ` +resource "google_compute_http_health_check" "foobar" { + check_interval_sec = 1 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + host = "foobar" + name = "terraform-test" + port = "80" + request_path = "/health_check" + timeout_sec = 2 + unhealthy_threshold = 3 +} +` diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index 98e9faf95..33664f013 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -109,6 +109,30 @@ func resourceComputeInstance() *schema.Resource { }, }, + "service_accounts": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "scopes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "can_ip_forward": &schema.Schema{ Type: schema.TypeBool, Optional: true, diff --git a/builtin/providers/google/resource_compute_target_pool.go b/builtin/providers/google/resource_compute_target_pool.go new file mode 100644 index 000000000..bbf095900 --- /dev/null +++ b/builtin/providers/google/resource_compute_target_pool.go @@ -0,0 +1,404 @@ +package google + +import ( + "fmt" + "log" + "strings" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeTargetPool() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetPoolCreate, + Read: resourceComputeTargetPoolRead, + Delete: resourceComputeTargetPoolDelete, + Update: resourceComputeTargetPoolUpdate, + + Schema: map[string]*schema.Schema{ + "backup_pool": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "failover_ratio": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + }, + + "health_checks": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "instances": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "session_affinity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func convertStringArr(ifaceArr []interface{}) []string { + arr := make([]string, len(ifaceArr)) + for i, v := range ifaceArr { + arr[i] = v.(string) + } + return arr +} + +func waitOp(config *Config, op *compute.Operation, + resource string, action string) (*compute.Operation, error) { + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err) + } + return opRaw.(*compute.Operation), nil +} + +// Healthchecks need to exist before being referred to from the target pool. +func convertHealthChecks(config *Config, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + // Look up the healthcheck + res, err := config.clientCompute.HttpHealthChecks.Get(config.Project, name).Do() + if err != nil { + return nil, fmt.Errorf("Error reading HealthCheck: %s", err) + } + urls[i] = res.SelfLink + } + return urls, nil +} + +// Instances do not need to exist yet, so we simply generate URLs. +// Instances can be full URLS or zone/name +func convertInstances(config *Config, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { + urls[i] = name + } else { + splitName := strings.Split(name, "/") + if len(splitName) != 2 { + return nil, fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name) + } else { + urls[i] = fmt.Sprintf( + "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", + config.Project, splitName[0], splitName[1]) + } + } + } + return urls, nil +} + +func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hchkUrls, err := convertHealthChecks( + config, convertStringArr(d.Get("health_checks").([]interface{}))) + if err != nil { + return err + } + + instanceUrls, err := convertInstances( + config, convertStringArr(d.Get("instances").([]interface{}))) + if err != nil { + return err + } + + // Build the parameter + tpool := &compute.TargetPool{ + BackupPool: d.Get("backup_pool").(string), + Description: d.Get("description").(string), + HealthChecks: hchkUrls, + Instances: instanceUrls, + Name: d.Get("name").(string), + SessionAffinity: d.Get("session_affinity").(string), + } + if d.Get("failover_ratio") != nil { + tpool.FailoverRatio = d.Get("failover_ratio").(float64) + } + log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) + op, err := config.clientCompute.TargetPools.Insert( + config.Project, config.Region, tpool).Do() + if err != nil { + return fmt.Errorf("Error creating TargetPool: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(tpool.Name) + + op, err = waitOp(config, op, "TargetPool", "create") + if err != nil { + return err + } + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeTargetPoolRead(d, meta) +} + +func calcAddRemove(from []string, to []string) ([]string, []string) { + add := make([]string, 0) + remove := make([]string, 0) + for _, u := range to { + found := false + for _, v := range from { + if u == v { + found = true + break + } + } + if !found { + add = append(add, u) + } + } + for _, u := range from { + found := false + for _, v := range to { + if u == v { + found = true + break + } + } + if !found { + remove = append(remove, u) + } + } + return add, remove +} + + +func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("health_checks") { + + from_, to_ := d.GetChange("health_checks") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertHealthChecks(config, from) + if err != nil { + return err + } + toUrls, err := convertHealthChecks(config, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(remove)), + } + for i, v := range remove { + removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err := config.clientCompute.TargetPools.RemoveHealthCheck( + config.Project, config.Region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "removing HealthChecks") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + addReq := &compute.TargetPoolsAddHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(add)), + } + for i, v := range add { + addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err = config.clientCompute.TargetPools.AddHealthCheck( + config.Project, config.Region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "adding HealthChecks") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("health_checks") + } + + if d.HasChange("instances") { + + from_, to_ := d.GetChange("instances") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertInstances(config, from) + if err != nil { + return err + } + toUrls, err := convertInstances(config, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + addReq := &compute.TargetPoolsAddInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(add)), + } + for i, v := range add { + addReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err := config.clientCompute.TargetPools.AddInstance( + config.Project, config.Region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "adding instances") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + removeReq := &compute.TargetPoolsRemoveInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(remove)), + } + for i, v := range remove { + removeReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err = config.clientCompute.TargetPools.RemoveInstance( + config.Project, config.Region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "removing instances") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("instances") + } + + if d.HasChange("backup_pool") { + bpool_name := d.Get("backup_pool").(string) + tref := &compute.TargetReference{ + Target: bpool_name, + } + op, err := config.clientCompute.TargetPools.SetBackup( + config.Project, config.Region, d.Id(), tref).Do() + if err != nil { + return fmt.Errorf("Error updating backup_pool: %s", err) + } + + op, err = waitOp(config, op, "TargetPool", "updating backup_pool") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("backup_pool") + } + + d.Partial(false) + + return resourceComputeTargetPoolRead(d, meta) +} + +func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + tpool, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading TargetPool: %s", err) + } + + d.Set("self_link", tpool.SelfLink) + + return nil +} + +func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the TargetPool + op, err := config.clientCompute.TargetPools.Delete( + config.Project, config.Region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetPool: %s", err) + } + + op, err = waitOp(config, op, "TargetPool", "delete") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/google/resource_compute_target_pool_test.go b/builtin/providers/google/resource_compute_target_pool_test.go new file mode 100644 index 000000000..4a65eaac6 --- /dev/null +++ b/builtin/providers/google/resource_compute_target_pool_test.go @@ -0,0 +1,80 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeTargetPool_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetPoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetPool_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetPoolExists( + "google_compute_target_pool.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetPoolDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_target_pool" { + continue + } + + _, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("TargetPool still exists") + } + } + + return nil +} + +func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("TargetPool not found") + } + + return nil + } +} + +const testAccComputeTargetPool_basic = ` +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" + session_affinity = "CLIENT_IP_PROTO" +}` diff --git a/website/source/docs/providers/google/r/compute_address.html.markdown b/website/source/docs/providers/google/r/compute_address.html.markdown index 5365fa2b6..c0551c11f 100644 --- a/website/source/docs/providers/google/r/compute_address.html.markdown +++ b/website/source/docs/providers/google/r/compute_address.html.markdown @@ -8,7 +8,10 @@ description: |- # google\_compute\_address -Creates a static IP address resource for Google Compute Engine. +Creates a static IP address resource for Google Compute Engine. For more information see +[the official documentation](https://cloud.google.com/compute/docs/instances-and-network) and +[API](https://cloud.google.com/compute/docs/reference/latest/addresses). + ## Example Usage @@ -31,3 +34,4 @@ The following attributes are exported: * `name` - The name of the resource. * `address` - The IP address that was allocated. +* `self_link` - The URI of the created resource. diff --git a/website/source/docs/providers/google/r/compute_forwarding_rule.html.markdown b/website/source/docs/providers/google/r/compute_forwarding_rule.html.markdown new file mode 100644 index 000000000..9e8313189 --- /dev/null +++ b/website/source/docs/providers/google/r/compute_forwarding_rule.html.markdown @@ -0,0 +1,53 @@ +--- +layout: "google" +page_title: "Google: google_compute_forwarding_rule" +sidebar_current: "docs-google-resource-forwarding_rule" +description: |- + Manages a Target Pool within GCE. +--- + +# google\_compute\_forwarding\_rule + +Manages a Forwarding Rule within GCE. This binds an ip and port range to a target pool. For more +information see [the official +documentation](https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules) and +[API](https://cloud.google.com/compute/docs/reference/latest/forwardingRules). + +## Example Usage + +``` +resource "google_compute_forwarding_rule" "default" { + name = "test" + target = "${google_compute_target_pool.default.self_link}" + port_range = "80" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `description` - (Optional) Textual description field. + +* `ip_address` - (Optional) The static IP. (if not set, an ephemeral IP is +used). + +* `ip_protocol` - (Optional) The IP protocol to route, one of "TCP" "UDP" "AH" "ESP" or "SCTP". (default "TCP"). + +* `name` - (Required) A unique name for the resource, required by GCE. Changing + this forces a new resource to be created. + +* `port_range` - (Optional) A range e.g. "1024-2048" or a single port "1024" +(defaults to all ports!). + +* `target` - URL of target pool. + +## Attributes Reference + +The following attributes are exported: + +* `self_link` - The URL of the created resource. + +* `ip_address` - The IP address that was chosen (or specified). + + diff --git a/website/source/docs/providers/google/r/compute_http_health_check.html.markdown b/website/source/docs/providers/google/r/compute_http_health_check.html.markdown new file mode 100644 index 000000000..4a4cd3481 --- /dev/null +++ b/website/source/docs/providers/google/r/compute_http_health_check.html.markdown @@ -0,0 +1,57 @@ +--- +layout: "google" +page_title: "Google: google_compute_http_health_check" +sidebar_current: "docs-google-resource-http_health_check" +description: |- + Manages an HTTP Health Check within GCE. +--- + +# google\_compute\_http\_health\_check + +Manages an HTTP health check within GCE. This is used to monitor instances +behind load balancers. Timeouts or HTTP errors cause the instance to be +removed from the pool. For more information, see [the official +documentation](https://cloud.google.com/compute/docs/load-balancing/health-checks) +and +[API](https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks). + +## Example Usage + +``` +resource "google_compute_http_health_check" "default" { + name = "test" + request_path = "/health_check" + check_interval_sec = 1 + timeout_sec = 1 +} +``` + +## Argument Reference + +The following arguments are supported: + +* `check_interval_sec` - (Optional) How often to poll each instance (default 5). + +* `description` - (Optional) Textual description field. + +* `healthy_threshold` - (Optional) Consecutive successes required (default 2). + +* `host` - (Optional) HTTP host header field (default instance's public ip). + +* `name` - (Required) A unique name for the resource, required by GCE. + Changing this forces a new resource to be created. + +* `port` - (Optional) TCP port to connect to (default 80). + +* `request_path` - (Optional) URL path to query (default /). + +* `timeout_sec` - (Optional) How long before declaring failure (default 5). + +* `unhealthy_threshold` - (Optional) Consecutive failures required (default 2). + + +## Attributes Reference + +The following attributes are exported: + +* `self_link` - The URL of the created resource. diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown index a9e6a6687..b2ea6baf7 100644 --- a/website/source/docs/providers/google/r/compute_instance.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance.html.markdown @@ -8,7 +8,11 @@ description: |- # google\_compute\_instance -Manages a VM instance resource within GCE. +Manages a VM instance resource within GCE. For more information see +[the official documentation](https://cloud.google.com/compute/docs/instances) +and +[API](https://cloud.google.com/compute/docs/reference/latest/instances). + ## Example Usage diff --git a/website/source/docs/providers/google/r/compute_target_pool.html.markdown b/website/source/docs/providers/google/r/compute_target_pool.html.markdown new file mode 100644 index 000000000..1efc5905e --- /dev/null +++ b/website/source/docs/providers/google/r/compute_target_pool.html.markdown @@ -0,0 +1,58 @@ +--- +layout: "google" +page_title: "Google: google_compute_target_pool" +sidebar_current: "docs-google-resource-target_pool" +description: |- + Manages a Target Pool within GCE. +--- + +# google\_compute\_target\_pool + +Manages a Target Pool within GCE. This is a collection of instances used as +target of a network load balancer (Forwarding Rule). For more information see +[the official +documentation](https://cloud.google.com/compute/docs/load-balancing/network/target-pools) +and [API](https://cloud.google.com/compute/docs/reference/latest/targetPools). + + +## Example Usage + +``` +resource "google_compute_target_pool" "default" { + name = "test" + instances = [ "us-central1-a/myinstance1", "us-central1-b/myinstance2" ] + health_checks = [ "${google_compute_http_health_check.default.name}" ] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `backup_pool` - (Optional) URL to the backup target pool. Must also set + failover\_ratio. + +* `description` - (Optional) Textual description field. + +* `failover_ratio` - (Optional) Ratio (0 to 1) of failed nodes before using the + backup pool (which must also be set). + +* `health_checks` - (Optional) List of zero or one healthcheck names. + +* `instances` - (Optional) List of instances in the pool. They can be given as + URLs, or in the form of "zone/name". Note that the instances need not exist + at the time of target pool creation, so there is no need to use the Terraform + interpolators to create a dependency on the instances from the target pool. + +* `name` - (Required) A unique name for the resource, required by GCE. Changing + this forces a new resource to be created. + +* `session_affinity` - (Optional) How to distribute load. Options are "NONE" (no affinity). "CLIENT\_IP" (hash of the source/dest addresses / ports), and "CLIENT\_IP\_PROTO" also includes the protocol (default "NONE"). + + +## Attributes Reference + +The following attributes are exported: + +* `self_link` - The URL of the created resource. + From 863fd0c7d29f9cd471023665e5deb55da5d1847e Mon Sep 17 00:00:00 2001 From: Phil Frost Date: Fri, 30 Jan 2015 13:17:16 -0500 Subject: [PATCH 03/32] Avoid unnecessary updating of aws_subnet If map_public_ip_on_launch was not specified, AWS picks a default of "0", which is different than the "" in the state file, triggerinng an update each time. Mark that parameter as Computed, avoiding the update. --- builtin/providers/aws/resource_aws_subnet.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builtin/providers/aws/resource_aws_subnet.go b/builtin/providers/aws/resource_aws_subnet.go index 7bb88f58f..4e11785e7 100644 --- a/builtin/providers/aws/resource_aws_subnet.go +++ b/builtin/providers/aws/resource_aws_subnet.go @@ -41,6 +41,7 @@ func resourceAwsSubnet() *schema.Resource { "map_public_ip_on_launch": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Computed: true, }, "tags": tagsSchema(), From a764adbf1b24a6a61b783a2aa8b8ac00f2b8cabd Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 30 Jan 2015 15:43:47 -0600 Subject: [PATCH 04/32] Makefile: new deps strategy fixes deps in branches Currently when running `make updatedeps` from a branch, the dependency list from master ends up getting used. We tried to work around this in 35490f7812395ff46fcba52989b3fbc3e44215c3, and got part way there, but here's what was happening: - record the current SHA - run `go get -f -u -v ./...` which ends up checking out master - master is checked out early in the `go get` process, which means all subsequent dependencies are resolved from master - re-checkout the recorded SHA - run tests This works in most cases, except when the branch being tested actually changes the list of dependencies in some way. Here we move away from letting `go get -v` walk through everything in `./...`, instead building our own list of dependencies with the help of `deplist`. We can then filter terraform packages out from the list, so they don't get touched, and safely update the rest. This should solve problems like those observed in #899 and #900. __Note__: had to add a feature to deplist to make this work properly; see https://github.com/phinze/deplist/commit/016ef97111533b1b8500b1d0db55133e7dba495a Working on getting it accepted upstream. --- Makefile | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index fde469c9c..649fd1b32 100644 --- a/Makefile +++ b/Makefile @@ -32,14 +32,15 @@ testrace: generate # updatedeps installs all the dependencies that Terraform needs to run # and build. updatedeps: - $(eval REF := $(shell sh -c "\ - git symbolic-ref --short HEAD 2>/dev/null \ - || git rev-parse HEAD")) + go get -u github.com/phinze/deplist go get -u github.com/mitchellh/gox go get -u golang.org/x/tools/cmd/stringer go get -u golang.org/x/tools/cmd/vet - go get -f -u -v ./... - git checkout $(REF) + go list github.com/hashicorp/terraform/... \ + | xargs -n 1 deplist -s \ + | grep -v github.com/hashicorp/terraform \ + | sort -u \ + | xargs go get -f -u -v # vet runs the Go source code static analysis tool `vet` to find # any common errors. From f561e2a6a87aa4640929334c4a91e011bbd57f90 Mon Sep 17 00:00:00 2001 From: Ferran Rodenas Date: Sat, 10 Jan 2015 19:01:32 -0800 Subject: [PATCH 05/32] Add Azure provider --- builtin/bins/provider-azure/main.go | 12 + builtin/bins/provider-azure/main_test.go | 1 + builtin/providers/azure/config.go | 30 +++ builtin/providers/azure/provider.go | 48 ++++ builtin/providers/azure/provider_test.go | 35 +++ .../azure/resource_virtual_machine.go | 241 ++++++++++++++++++ .../azure/resource_virtual_machine_test.go | 180 +++++++++++++ website/source/assets/stylesheets/_docs.scss | 1 + .../docs/providers/azure/index.html.markdown | 37 +++ .../azure/r/virtual_machine.html.markdown | 71 ++++++ website/source/layouts/azure.erb | 26 ++ website/source/layouts/docs.erb | 4 + 12 files changed, 686 insertions(+) create mode 100644 builtin/bins/provider-azure/main.go create mode 100644 builtin/bins/provider-azure/main_test.go create mode 100644 builtin/providers/azure/config.go create mode 100644 builtin/providers/azure/provider.go create mode 100644 builtin/providers/azure/provider_test.go create mode 100644 builtin/providers/azure/resource_virtual_machine.go create mode 100644 builtin/providers/azure/resource_virtual_machine_test.go create mode 100644 website/source/docs/providers/azure/index.html.markdown create mode 100644 website/source/docs/providers/azure/r/virtual_machine.html.markdown create mode 100644 website/source/layouts/azure.erb diff --git a/builtin/bins/provider-azure/main.go b/builtin/bins/provider-azure/main.go new file mode 100644 index 000000000..45af21656 --- /dev/null +++ b/builtin/bins/provider-azure/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/azure" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: azure.Provider, + }) +} diff --git a/builtin/bins/provider-azure/main_test.go b/builtin/bins/provider-azure/main_test.go new file mode 100644 index 000000000..06ab7d0f9 --- /dev/null +++ b/builtin/bins/provider-azure/main_test.go @@ -0,0 +1 @@ +package main diff --git a/builtin/providers/azure/config.go b/builtin/providers/azure/config.go new file mode 100644 index 000000000..4f093d591 --- /dev/null +++ b/builtin/providers/azure/config.go @@ -0,0 +1,30 @@ +package azure + +import ( + "fmt" + "log" + "os" + + azure "github.com/MSOpenTech/azure-sdk-for-go" +) + +type Config struct { + PublishSettingsFile string +} + +func (c *Config) loadAndValidate() error { + if _, err := os.Stat(c.PublishSettingsFile); os.IsNotExist(err) { + return fmt.Errorf( + "Error loading Azure Publish Settings file '%s': %s", + c.PublishSettingsFile, + err) + } + + log.Printf("[INFO] Importing Azure Publish Settings file...") + err := azure.ImportPublishSettingsFile(c.PublishSettingsFile) + if err != nil { + return err + } + + return nil +} diff --git a/builtin/providers/azure/provider.go b/builtin/providers/azure/provider.go new file mode 100644 index 000000000..199491e37 --- /dev/null +++ b/builtin/providers/azure/provider.go @@ -0,0 +1,48 @@ +package azure + +import ( + "os" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "publish_settings_file": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("AZURE_PUBLISH_SETTINGS_FILE"), + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "azure_virtual_machine": resourceVirtualMachine(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + PublishSettingsFile: d.Get("publish_settings_file").(string), + } + + if err := config.loadAndValidate(); err != nil { + return nil, err + } + + return &config, nil +} diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go new file mode 100644 index 000000000..4a40c5301 --- /dev/null +++ b/builtin/providers/azure/provider_test.go @@ -0,0 +1,35 @@ +package azure + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "azure": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("AZURE_PUBLISH_SETTINGS_FILE"); v == "" { + t.Fatal("AZURE_PUBLISH_SETTINGS_FILE must be set for acceptance tests") + } +} diff --git a/builtin/providers/azure/resource_virtual_machine.go b/builtin/providers/azure/resource_virtual_machine.go new file mode 100644 index 000000000..88dd9f9fb --- /dev/null +++ b/builtin/providers/azure/resource_virtual_machine.go @@ -0,0 +1,241 @@ +package azure + +import ( + "bytes" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "github.com/MSOpenTech/azure-sdk-for-go/clients/vmClient" +) + +func resourceVirtualMachine() *schema.Resource { + return &schema.Resource{ + Create: resourceVirtualMachineCreate, + Read: resourceVirtualMachineRead, + Delete: resourceVirtualMachineDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "location": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "image": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + ForceNew: true, + }, + + "ssh_public_key_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + ForceNew: true, + }, + + "ssh_port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 22, + ForceNew: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, // This can be updatable once we support updates on the resource + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "local_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: resourceVirtualMachineEndpointHash, + }, + + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "vip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Creating Azure Virtual Machine Configuration...") + vmConfig, err := vmClient.CreateAzureVMConfiguration( + d.Get("name").(string), + d.Get("size").(string), + d.Get("image").(string), + d.Get("location").(string)) + if err != nil { + return fmt.Errorf("Error creating Azure virtual machine configuration: %s", err) + } + + // Only Linux VMs are supported. If we want to support other VM types, we need to + // grab the image details and based on the OS add the corresponding configuration. + log.Printf("[DEBUG] Adding Azure Linux Provisioning Configuration...") + vmConfig, err = vmClient.AddAzureLinuxProvisioningConfig( + vmConfig, + d.Get("username").(string), + d.Get("password").(string), + d.Get("ssh_public_key_file").(string), + d.Get("ssh_port").(int)) + if err != nil { + return fmt.Errorf("Error adding Azure linux provisioning configuration: %s", err) + } + + if v := d.Get("endpoint").(*schema.Set); v.Len() > 0 { + log.Printf("[DEBUG] Adding Endpoints to the Azure Virtual Machine...") + endpoints := make([]vmClient.InputEndpoint, v.Len()) + for i, v := range v.List() { + m := v.(map[string]interface{}) + endpoint := vmClient.InputEndpoint{} + endpoint.Name = m["name"].(string) + endpoint.Protocol = m["protocol"].(string) + endpoint.Port = m["port"].(int) + endpoint.LocalPort = m["local_port"].(int) + endpoints[i] = endpoint + } + + configSets := vmConfig.ConfigurationSets.ConfigurationSet + if len(configSets) == 0 { + return fmt.Errorf("Azure virtual machine does not have configuration sets") + } + for i := 0; i < len(configSets); i++ { + if configSets[i].ConfigurationSetType != "NetworkConfiguration" { + continue + } + configSets[i].InputEndpoints.InputEndpoint = + append(configSets[i].InputEndpoints.InputEndpoint, endpoints...) + } + } + + log.Printf("[DEBUG] Creating Azure Virtual Machine...") + err = vmClient.CreateAzureVM( + vmConfig, + d.Get("name").(string), + d.Get("location").(string)) + if err != nil { + return fmt.Errorf("Error creating Azure virtual machine: %s", err) + } + + d.SetId(d.Get("name").(string)) + + return resourceVirtualMachineRead(d, meta) +} + +func resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Getting Azure Virtual Machine Deployment: %s", d.Id()) + VMDeployment, err := vmClient.GetVMDeployment(d.Id(), d.Id()) + if err != nil { + return fmt.Errorf("Error getting Azure virtual machine deployment: %s", err) + } + + d.Set("url", VMDeployment.Url) + + roleInstances := VMDeployment.RoleInstanceList.RoleInstance + if len(roleInstances) == 0 { + return fmt.Errorf("Virtual Machine does not have IP addresses") + } + ipAddress := roleInstances[0].IpAddress + d.Set("ip_address", ipAddress) + + vips := VMDeployment.VirtualIPs.VirtualIP + if len(vips) == 0 { + return fmt.Errorf("Virtual Machine does not have VIP addresses") + } + vip := vips[0].Address + d.Set("vip_address", vip) + + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": vip, + "user": d.Get("username").(string), + }) + + return nil +} + +func resourceVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Deleting Azure Virtual Machine Deployment: %s", d.Id()) + if err := vmClient.DeleteVMDeployment(d.Id(), d.Id()); err != nil { + return fmt.Errorf("Error deleting Azure virtual machine deployment: %s", err) + } + + log.Printf("[DEBUG] Deleting Azure Hosted Service: %s", d.Id()) + if err := vmClient.DeleteHostedService(d.Id()); err != nil { + return fmt.Errorf("Error deleting Azure hosted service: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceVirtualMachineEndpointHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["local_port"].(int))) + + return hashcode.String(buf.String()) +} diff --git a/builtin/providers/azure/resource_virtual_machine_test.go b/builtin/providers/azure/resource_virtual_machine_test.go new file mode 100644 index 000000000..c519383d2 --- /dev/null +++ b/builtin/providers/azure/resource_virtual_machine_test.go @@ -0,0 +1,180 @@ +package azure + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/MSOpenTech/azure-sdk-for-go/clients/vmClient" +) + +func TestAccAzureVirtualMachine_Basic(t *testing.T) { + var VMDeployment vmClient.VMDeployment + + // The VM name can only be used once globally within azure, + // so we need to generate a random one + rand.Seed(time.Now().UnixNano()) + vmName := fmt.Sprintf("tf-test-vm-%d", rand.Int31()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAzureVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAzureVirtualMachineConfig_basic(vmName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAzureVirtualMachineExists("azure_virtual_machine.foobar", &VMDeployment), + testAccCheckAzureVirtualMachineAttributes(&VMDeployment, vmName), + resource.TestCheckResourceAttr( + "azure_virtual_machine.foobar", "name", vmName), + resource.TestCheckResourceAttr( + "azure_virtual_machine.foobar", "location", "West US"), + resource.TestCheckResourceAttr( + "azure_virtual_machine.foobar", "image", "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB"), + resource.TestCheckResourceAttr( + "azure_virtual_machine.foobar", "size", "Basic_A1"), + resource.TestCheckResourceAttr( + "azure_virtual_machine.foobar", "username", "foobar"), + ), + }, + }, + }) +} + +func TestAccAzureVirtualMachine_Endpoints(t *testing.T) { + var VMDeployment vmClient.VMDeployment + + // The VM name can only be used once globally within azure, + // so we need to generate a random one + rand.Seed(time.Now().UnixNano()) + vmName := fmt.Sprintf("tf-test-vm-%d", rand.Int31()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAzureVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAzureVirtualMachineConfig_endpoints(vmName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAzureVirtualMachineExists("azure_virtual_machine.foobar", &VMDeployment), + testAccCheckAzureVirtualMachineAttributes(&VMDeployment, vmName), + testAccCheckAzureVirtualMachineEndpoint(&VMDeployment, "tcp", 80), + ), + }, + }, + }) +} + +func testAccCheckAzureVirtualMachineDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "azure_virtual_machine" { + continue + } + + _, err := vmClient.GetVMDeployment(rs.Primary.ID, rs.Primary.ID) + if err == nil { + return fmt.Errorf("Azure Virtual Machine (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAzureVirtualMachineExists(n string, VMDeployment *vmClient.VMDeployment) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Azure Virtual Machine ID is set") + } + + retrieveVMDeployment, err := vmClient.GetVMDeployment(rs.Primary.ID, rs.Primary.ID) + if err != nil { + return err + } + + if retrieveVMDeployment.Name != rs.Primary.ID { + return fmt.Errorf("Azure Virtual Machine not found %s %s", VMDeployment.Name, rs.Primary.ID) + } + + *VMDeployment = *retrieveVMDeployment + + return nil + } +} + +func testAccCheckAzureVirtualMachineAttributes(VMDeployment *vmClient.VMDeployment, vmName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if VMDeployment.Name != vmName { + return fmt.Errorf("Bad name: %s != %s", VMDeployment.Name, vmName) + } + + return nil + } +} + +func testAccCheckAzureVirtualMachineEndpoint(VMDeployment *vmClient.VMDeployment, protocol string, publicPort int) resource.TestCheckFunc { + return func(s *terraform.State) error { + roleInstances := VMDeployment.RoleInstanceList.RoleInstance + if len(roleInstances) == 0 { + return fmt.Errorf("Azure virtual machine does not have role instances") + } + + for i := 0; i < len(roleInstances); i++ { + instanceEndpoints := roleInstances[i].InstanceEndpoints.InstanceEndpoint + if len(instanceEndpoints) == 0 { + return fmt.Errorf("Azure virtual machine does not have endpoints") + } + endpointFound := 0 + for j := 0; i < len(instanceEndpoints); i++ { + if instanceEndpoints[j].Protocol == protocol && instanceEndpoints[j].PublicPort == publicPort { + endpointFound = 1 + break + } + } + if endpointFound == 0 { + return fmt.Errorf("Azure virtual machine does not have endpoint %s/%d", protocol, publicPort) + } + } + + return nil + } +} + +func testAccCheckAzureVirtualMachineConfig_basic(vmName string) string { + return fmt.Sprintf(` +resource "azure_virtual_machine" "foobar" { + name = "%s" + location = "West US" + image = "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB" + size = "Basic_A1" + username = "foobar" +} +`, vmName) +} + +func testAccCheckAzureVirtualMachineConfig_endpoints(vmName string) string { + return fmt.Sprintf(` +resource "azure_virtual_machine" "foobar" { + name = "%s" + location = "West US" + image = "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB" + size = "Basic_A1" + username = "foobar" + endpoint { + name = "http" + protocol = "tcp" + port = 80 + local_port = 80 + } +} +`, vmName) +} diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index a0d2ce807..cb1686a6e 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -16,6 +16,7 @@ body.layout-heroku, body.layout-mailgun, body.layout-digitalocean, body.layout-aws, +body.layout-azure, body.layout-docs, body.layout-inner, body.layout-downloads, diff --git a/website/source/docs/providers/azure/index.html.markdown b/website/source/docs/providers/azure/index.html.markdown new file mode 100644 index 000000000..4991ae632 --- /dev/null +++ b/website/source/docs/providers/azure/index.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "azure" +page_title: "Provider: Microsoft Azure" +sidebar_current: "docs-azure-index" +description: |- + The Azure provider is used to interact with Microsoft Azure services. The provider needs to be configured with the proper credentials before it can be used. +--- + +# Azure Provider + +The Azure provider is used to interact with +[Microsoft Azure](http://azure.microsoft.com/). The provider needs +to be configured with the proper credentials before it can be used. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the Azure provider +provider "azure" { + publish_settings_file = "account.publishsettings" +} + +# Create a new instance +resource "azure_virtual_machine" "default" { + ... +} +``` + +## Argument Reference + +The following keys can be used to configure the provider. + +* `publish_settings_file` - (Required) Path to the JSON file used to describe + your account settings, downloaded from Microsoft Azure. It must be provided, + but it can also be sourced from the AZURE_PUBLISH_SETTINGS_FILE environment variable. diff --git a/website/source/docs/providers/azure/r/virtual_machine.html.markdown b/website/source/docs/providers/azure/r/virtual_machine.html.markdown new file mode 100644 index 000000000..946f3b11d --- /dev/null +++ b/website/source/docs/providers/azure/r/virtual_machine.html.markdown @@ -0,0 +1,71 @@ +--- +layout: "azure" +page_title: "Azure: azure_virtual_machine" +sidebar_current: "docs-azure-resource-virtual-machine" +description: |- + Manages a Virtual Machine resource within Azure. +--- + +# azure\_virtual\_machine + +Manages a Virtual Machine resource within Azure. + +## Example Usage + +``` +resource "azure_virtual_machine" "default" { + name = "test" + location = "West US" + image = "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB" + size = "Basic_A1" + username = "${var.username}" + password = ""${var.password}" + ssh_public_key_file = "${var.azure_ssh_public_key_file}" + endpoint { + name = "http" + protocol = "tcp" + port = 80 + local_port = 80 + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) A name for the virtual machine. It must use between 3 and + 24 lowercase letters and numbers and it must be unique within Azure. + +* `location` - (Required) The location that the virtual machine should be created in. + +* `image` - (Required) A image to be used to create the virtual machine. + +* `size` - (Required) Size that you want to use for the virtual machine. + +* `username` - (Required) Name of the account that you will use to administer + the virtual machine. You cannot use root for the user name. + +* `password` - (Optional) Password for the admin account. + +* `ssh_public_key_file` - (Optional) SSH key (PEM format). + +* `ssh_port` - (Optional) SSH port. + +* `endpoint` - (Optional) Can be specified multiple times for each + endpoint rule. Each endpoint block supports fields documented below. + +The `endpoint` block supports: + +* `name` - (Required) The name of the endpoint. +* `protocol` - (Required) The protocol. +* `port` - (Required) The public port. +* `local_port` - (Required) The private port. + +## Attributes Reference + +The following attributes are exported: + +* `url` - The URL for the virtual machine deployment. +* `ip_address` - The internal IP address of the virtual machine. +* `vip_address` - The public Virtual IP address of the virtual machine. diff --git a/website/source/layouts/azure.erb b/website/source/layouts/azure.erb new file mode 100644 index 000000000..918a12469 --- /dev/null +++ b/website/source/layouts/azure.erb @@ -0,0 +1,26 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + + <% end %> + + <%= yield %> +<% end %> diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index c71ac5a2e..8e07b6104 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -112,6 +112,10 @@ AWS + > + Azure + + > CloudFlare From d78309694a7c4579098233721f7e03259d082021 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 30 Jan 2015 16:46:35 -0600 Subject: [PATCH 06/32] Update CHANGELOG.md --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 810a6c172..b04ae280f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ ## 0.3.7 (unreleased) +FEATURES: + + * **New provider: `azure`** - initially just supporting Linux virtual + machines [GH-899] + IMPROVEMENTS: * core: Formalized the syntax of interpolations and documented it From 170ddc4bd48d4ab854ab458e1446180ccdb88095 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 30 Jan 2015 17:54:31 -0600 Subject: [PATCH 07/32] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b04ae280f..c4903ac1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ FEATURES: IMPROVEMENTS: + * **New resources: `google_compute_forwarding_rule`, `google_compute_http_health_check`, + and `google_compute_target_pool`** - Together these provide network-level + load balancing. [GH-588] * core: Formalized the syntax of interpolations and documented it very heavily. * core: Strings in interpolations can now contain further interpolations, From b8fb0c0838e422f91b0a907d947572358e298ab8 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 30 Jan 2015 18:29:12 -0600 Subject: [PATCH 08/32] Makefile: simplify updatedeps; no need for deplist After discussing with the very gracious @cespare over at https://github.com/cespare/deplist/pull/2 I now understand that we can pull off the same logic with just `go list`. The logic is now simpler and more consistent: * List out all packages in our repo * For each of those packages, list their dependencies * Filter out any dependencies that already live in this repo * Remove duplicates * And fetch the rest. `go get` will work out all transitive dependencies from there --- Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 649fd1b32..2e49e9d09 100644 --- a/Makefile +++ b/Makefile @@ -32,12 +32,11 @@ testrace: generate # updatedeps installs all the dependencies that Terraform needs to run # and build. updatedeps: - go get -u github.com/phinze/deplist go get -u github.com/mitchellh/gox go get -u golang.org/x/tools/cmd/stringer go get -u golang.org/x/tools/cmd/vet - go list github.com/hashicorp/terraform/... \ - | xargs -n 1 deplist -s \ + go list ./... \ + | xargs go list -f '{{join .Deps "\n"}}' \ | grep -v github.com/hashicorp/terraform \ | sort -u \ | xargs go get -f -u -v From 6795d213df19153900efbf15ac57a60cf90f889b Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Sun, 1 Feb 2015 10:05:26 -0600 Subject: [PATCH 09/32] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c4903ac1c..ed79e9cd9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,8 @@ IMPROVEMENTS: * provider/aws: The `aws_db_instance` resource no longer requires both `final_snapshot_identifier` and `skip_final_snapshot`; the presence or absence of the former now implies the latter. [GH-874] + * provider/aws: Avoid unecessary update of `aws_subnet` when + `map_public_ip_on_launch` is not specified in config. [GH-898] * provider/google: Remove "client secrets file", as it's no longer necessary for API authentication [GH-884]. From 6da9f04c10ffc55c1893a1fad6ebae0a62486b8f Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Sun, 1 Feb 2015 10:35:33 -0600 Subject: [PATCH 10/32] fix build: upstream azure client change looks like https://github.com/MSOpenTech/azure-sdk-for-go/pull/30 changed the API for hosted services, which broke our build. --- builtin/providers/azure/resource_virtual_machine.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/builtin/providers/azure/resource_virtual_machine.go b/builtin/providers/azure/resource_virtual_machine.go index 88dd9f9fb..05f6e44ed 100644 --- a/builtin/providers/azure/resource_virtual_machine.go +++ b/builtin/providers/azure/resource_virtual_machine.go @@ -5,9 +5,10 @@ import ( "fmt" "log" + "github.com/MSOpenTech/azure-sdk-for-go/clients/hostedServiceClient" + "github.com/MSOpenTech/azure-sdk-for-go/clients/vmClient" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" - "github.com/MSOpenTech/azure-sdk-for-go/clients/vmClient" ) func resourceVirtualMachine() *schema.Resource { @@ -220,7 +221,7 @@ func resourceVirtualMachineDelete(d *schema.ResourceData, meta interface{}) erro } log.Printf("[DEBUG] Deleting Azure Hosted Service: %s", d.Id()) - if err := vmClient.DeleteHostedService(d.Id()); err != nil { + if err := hostedServiceClient.DeleteHostedService(d.Id()); err != nil { return fmt.Errorf("Error deleting Azure hosted service: %s", err) } From 2d9dd254938fc7d3741243bd46ad41b9c6941ab9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 1 Feb 2015 20:17:56 +0100 Subject: [PATCH 11/32] config/module: fix regression of ignoring forced type if valid URL /cc @ceh - Does this break Windows at all? This regressed with your commit (we didn't have test coverage on it). I added a test now, though. --- config/module/detect.go | 2 +- config/module/detect_test.go | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/config/module/detect.go b/config/module/detect.go index f70e69a47..9a56bf82d 100644 --- a/config/module/detect.go +++ b/config/module/detect.go @@ -41,7 +41,7 @@ func Detect(src string, pwd string) (string, error) { u, err := urlParse(getSrc) if err == nil && u.Scheme != "" { // Valid URL - return u.String(), nil + return src, nil } for _, d := range Detectors { diff --git a/config/module/detect_test.go b/config/module/detect_test.go index 69a5a6fef..a81bba12b 100644 --- a/config/module/detect_test.go +++ b/config/module/detect_test.go @@ -31,6 +31,12 @@ func TestDetect(t *testing.T) { "git::https://github.com/hashicorp/foo.git//bar", false, }, + { + "git::https://github.com/hashicorp/consul.git", + "", + "git::https://github.com/hashicorp/consul.git", + false, + }, } for i, tc := range cases { From 4ec0c347948331edb52043983d8f0c2da20ad379 Mon Sep 17 00:00:00 2001 From: David Watson Date: Mon, 2 Feb 2015 09:46:35 +0000 Subject: [PATCH 12/32] Expose SelfLink for GCE instances to allow other resources to reference instances. --- builtin/providers/google/resource_compute_instance.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index 33664f013..0b05f9238 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -193,6 +193,11 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -441,6 +446,8 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("tags_fingerprint", instance.Tags.Fingerprint) } + d.Set("self_link", instance.SelfLink) + return nil } From 5bbfc0d4e2c27aabdc3d9d7c0bbe21411d10c3f9 Mon Sep 17 00:00:00 2001 From: Emil Hessman Date: Mon, 2 Feb 2015 12:19:26 +0100 Subject: [PATCH 13/32] config/module: fix URL file path handling on Windows Only adjust the URL Scheme when parsing drive letter file paths on Windows, don't add a file scheme prefix. FileDetector is responsible for adding the file scheme prefix. --- config/module/detect.go | 3 +- config/module/url_helper.go | 110 ++++++++++++++++++------------------ 2 files changed, 56 insertions(+), 57 deletions(-) diff --git a/config/module/detect.go b/config/module/detect.go index 9a56bf82d..84e1a1d79 100644 --- a/config/module/detect.go +++ b/config/module/detect.go @@ -2,7 +2,6 @@ package module import ( "fmt" - "net/url" "path/filepath" ) @@ -67,7 +66,7 @@ func Detect(src string, pwd string) (string, error) { } } if subDir != "" { - u, err := url.Parse(result) + u, err := urlParse(result) if err != nil { return "", fmt.Errorf("Error parsing URL: %s", err) } diff --git a/config/module/url_helper.go b/config/module/url_helper.go index 58c4a8967..b1a8756a2 100644 --- a/config/module/url_helper.go +++ b/config/module/url_helper.go @@ -1,55 +1,55 @@ -package module - -import ( - "fmt" - "net/url" - "path/filepath" - "runtime" -) - -func urlParse(rawURL string) (*url.URL, error) { - if runtime.GOOS == "windows" { - if len(rawURL) > 1 && rawURL[1] == ':' { - // Assume we're dealing with a file path. - rawURL = fmtFileURL(rawURL) - } else { - // Make sure we're using "/" on Windows. URLs are "/"-based. - rawURL = filepath.ToSlash(rawURL) - } - } - u, err := url.Parse(rawURL) - if err != nil { - return nil, err - } - - if runtime.GOOS != "windows" { - return u, err - } - - if u.Scheme != "file" { - return u, err - } - - // Remove leading slash for absolute file paths on Windows. - // For example, url.Parse yields u.Path = "/C:/Users/user" for - // rawurl = "file:///C:/Users/user", which is an incorrect syntax. - if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { - u.Path = u.Path[1:] - } - - return u, err -} - -func fmtFileURL(path string) string { - if runtime.GOOS == "windows" { - // Make sure we're using "/" on Windows. URLs are "/"-based. - path = filepath.ToSlash(path) - } - - // Make sure that we don't start with "/" since we add that below. - if path[0] == '/' { - path = path[1:] - } - - return fmt.Sprintf("file:///%s", path) -} +package module + +import ( + "fmt" + "net/url" + "path/filepath" + "runtime" +) + +func urlParse(rawURL string) (*url.URL, error) { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + rawURL = filepath.ToSlash(rawURL) + } + u, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + if runtime.GOOS != "windows" { + return u, err + } + + if len(rawURL) > 1 && rawURL[1] == ':' { + // Assume we're dealing with a drive letter file path on Windows. + // We need to adjust the URL Path for drive letter file paths + // because url.Parse("c:/users/user") yields URL Scheme = "c" + // and URL path = "/users/user". + u.Path = fmt.Sprintf("%s:%s", u.Scheme, u.Path) + u.Scheme = "" + } + + // Remove leading slash for absolute file paths on Windows. + // For example, url.Parse yields u.Path = "/C:/Users/user" for + // rawURL = "file:///C:/Users/user", which is an incorrect syntax. + if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { + u.Path = u.Path[1:] + } + + return u, err +} + +func fmtFileURL(path string) string { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + path = filepath.ToSlash(path) + } + + // Make sure that we don't start with "/" since we add that below. + if path[0] == '/' { + path = path[1:] + } + + return fmt.Sprintf("file:///%s", path) +} From a8db835a44fa077a83b07013779502afd849d41d Mon Sep 17 00:00:00 2001 From: Emil Hessman Date: Mon, 2 Feb 2015 19:49:53 +0100 Subject: [PATCH 14/32] config/module: fix HgGetter test failures on Windows HgGetter tests failed on windows/amd64 using Mercurial version 3.2.4: --- FAIL: TestHgGetter (0.11s) get_hg_test.go:35: err: C:\Program Files\Mercurial\hg.exe exited with 255: abort: file:// URLs can only refer to localhost --- FAIL: TestHgGetter_branch (0.11s) get_hg_test.go:62: err: C:\Program Files\Mercurial\hg.exe exited with 255: abort: file:// URLs can only refer to localhost FAIL FAIL github.com/hashicorp/terraform/config/module 5.615s This commit fixes the failures by adjusting the file:// URL to a form that Mercurial expects. --- config/module/get_hg.go | 36 +++++++++++++++++++++++++++--------- config/module/url_helper.go | 8 ++++++++ 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/config/module/get_hg.go b/config/module/get_hg.go index a979eacfd..666762080 100644 --- a/config/module/get_hg.go +++ b/config/module/get_hg.go @@ -5,6 +5,7 @@ import ( "net/url" "os" "os/exec" + "runtime" ) // HgGetter is a Getter implementation that will download a module from @@ -16,34 +17,40 @@ func (g *HgGetter) Get(dst string, u *url.URL) error { return fmt.Errorf("hg must be available and on the PATH") } + newURL, err := urlParse(u.String()) + if err != nil { + return err + } + if fixWindowsDrivePath(newURL) { + // See valid file path form on http://www.selenic.com/hg/help/urls + newURL.Path = fmt.Sprintf("/%s", newURL.Path) + } + // Extract some query parameters we use var rev string - q := u.Query() + q := newURL.Query() if len(q) > 0 { rev = q.Get("rev") q.Del("rev") - // Copy the URL - var newU url.URL = *u - u = &newU - u.RawQuery = q.Encode() + newURL.RawQuery = q.Encode() } - _, err := os.Stat(dst) + _, err = os.Stat(dst) if err != nil && !os.IsNotExist(err) { return err } if err != nil { - if err := g.clone(dst, u); err != nil { + if err := g.clone(dst, newURL); err != nil { return err } } - if err := g.pull(dst, u); err != nil { + if err := g.pull(dst, newURL); err != nil { return err } - return g.update(dst, u, rev) + return g.update(dst, newURL, rev) } func (g *HgGetter) clone(dst string, u *url.URL) error { @@ -67,3 +74,14 @@ func (g *HgGetter) update(dst string, u *url.URL, rev string) error { cmd.Dir = dst return getRunCommand(cmd) } + +func fixWindowsDrivePath(u *url.URL) bool { + // hg assumes a file:/// prefix for Windows drive letter file paths. + // (e.g. file:///c:/foo/bar) + // If the URL Path does not begin with a '/' character, the resulting URL + // path will have a file:// prefix. (e.g. file://c:/foo/bar) + // See http://www.selenic.com/hg/help/urls and the examples listed in + // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 + return runtime.GOOS == "windows" && u.Scheme == "file" && + len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' +} diff --git a/config/module/url_helper.go b/config/module/url_helper.go index b1a8756a2..792761927 100644 --- a/config/module/url_helper.go +++ b/config/module/url_helper.go @@ -5,6 +5,7 @@ import ( "net/url" "path/filepath" "runtime" + "strings" ) func urlParse(rawURL string) (*url.URL, error) { @@ -30,6 +31,13 @@ func urlParse(rawURL string) (*url.URL, error) { u.Scheme = "" } + if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { + // Assume we're dealing with a drive letter file path on Windows + // where the drive letter has been parsed into the URL Host. + u.Path = fmt.Sprintf("%s%s", u.Host, u.Path) + u.Host = "" + } + // Remove leading slash for absolute file paths on Windows. // For example, url.Parse yields u.Path = "/C:/Users/user" for // rawURL = "file:///C:/Users/user", which is an incorrect syntax. From bcc85be991a56ff3e9a6565d85f8c0dc6933c0ab Mon Sep 17 00:00:00 2001 From: Patrick Lucas Date: Mon, 2 Feb 2015 12:12:03 -0800 Subject: [PATCH 15/32] Remove 'go get ... vet' from updatedeps target This package attempts to install itself to GOROOT which will fail for non-root users. Most users will have already installed the vet tool via a system package, so it shouldn't be necessary to 'go get' here. Moreover, the 'vet' make target already checks that it is installed before running it, running 'go get' if necessary. This is the output when running 'make updatedeps' as a regular user without this change: ``` $ make updatedeps go get -u github.com/mitchellh/gox go get -u golang.org/x/tools/cmd/stringer go get -u golang.org/x/tools/cmd/vet go install golang.org/x/tools/cmd/vet: open /usr/local/go/pkg/tool/linux_amd64/vet: permission denied make: *** [updatedeps] Error 1 ``` --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 2e49e9d09..7ea72d97f 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,6 @@ testrace: generate updatedeps: go get -u github.com/mitchellh/gox go get -u golang.org/x/tools/cmd/stringer - go get -u golang.org/x/tools/cmd/vet go list ./... \ | xargs go list -f '{{join .Deps "\n"}}' \ | grep -v github.com/hashicorp/terraform \ From 04ac1ffd0205bc539dba5b19abcfd3358fe3817e Mon Sep 17 00:00:00 2001 From: Greg Osuri Date: Mon, 2 Feb 2015 19:25:54 -0800 Subject: [PATCH 16/32] provider/aws: fix for #915 - aws_elb.health_check attributes does not update during update --- builtin/providers/aws/resource_aws_elb.go | 22 +++++++++ .../providers/aws/resource_aws_elb_test.go | 47 +++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/builtin/providers/aws/resource_aws_elb.go b/builtin/providers/aws/resource_aws_elb.go index 15feae278..bffa1e0a2 100644 --- a/builtin/providers/aws/resource_aws_elb.go +++ b/builtin/providers/aws/resource_aws_elb.go @@ -331,6 +331,28 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { d.SetPartial("cross_zone_load_balancing") } + if d.HasChange("health_check") { + vs := d.Get("health_check").(*schema.Set).List() + if len(vs) > 0 { + check := vs[0].(map[string]interface{}) + configureHealthCheckOpts := elb.ConfigureHealthCheck{ + LoadBalancerName: d.Id(), + Check: elb.HealthCheck{ + HealthyThreshold: int64(check["healthy_threshold"].(int)), + UnhealthyThreshold: int64(check["unhealthy_threshold"].(int)), + Interval: int64(check["interval"].(int)), + Target: check["target"].(string), + Timeout: int64(check["timeout"].(int)), + }, + } + _, err := elbconn.ConfigureHealthCheck(&configureHealthCheckOpts) + if err != nil { + return fmt.Errorf("Failure configuring health check: %s", err) + } + d.SetPartial("health_check") + } + } + d.Partial(false) return resourceAwsElbRead(d, meta) } diff --git a/builtin/providers/aws/resource_aws_elb_test.go b/builtin/providers/aws/resource_aws_elb_test.go index cb5be7291..50563565b 100644 --- a/builtin/providers/aws/resource_aws_elb_test.go +++ b/builtin/providers/aws/resource_aws_elb_test.go @@ -152,6 +152,31 @@ func TestAccAWSELB_HealthCheck(t *testing.T) { }, }) } + +func TestAccAWSELBUpdate_HealthCheck(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSELBDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSELBConfigHealthCheck, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "aws_elb.bar", "health_check.3484319807.healthy_threshold", "5"), + ), + }, + resource.TestStep{ + Config: testAccAWSELBConfigHealthCheck_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "aws_elb.bar", "health_check.2648756019.healthy_threshold", "10"), + ), + }, + }, + }) +} + func testAccCheckAWSELBDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).elbconn @@ -418,3 +443,25 @@ resource "aws_elb" "bar" { } } ` + +const testAccAWSELBConfigHealthCheck_update = ` +resource "aws_elb" "bar" { + name = "foobar-terraform-test" + availability_zones = ["us-west-2a"] + + listener { + instance_port = 8000 + instance_protocol = "http" + lb_port = 80 + lb_protocol = "http" + } + + health_check { + healthy_threshold = 10 + unhealthy_threshold = 5 + target = "HTTP:8000/" + interval = 60 + timeout = 30 + } +} +` From 33aa9d3ee80e79e5c22b2a26bddef1d5a0121a32 Mon Sep 17 00:00:00 2001 From: INADA Naoki Date: Tue, 3 Feb 2015 19:33:01 +0900 Subject: [PATCH 17/32] Fix stringer error on helper/schema/schema.go --- helper/schema/schema.go | 43 ----------------------------- helper/schema/valuetype.go | 46 +++++++++++++++++++++++++++++++ helper/schema/valuetype_string.go | 13 ++++++--- 3 files changed, 55 insertions(+), 47 deletions(-) create mode 100644 helper/schema/valuetype.go diff --git a/helper/schema/schema.go b/helper/schema/schema.go index f16df2d72..fc52548a4 100644 --- a/helper/schema/schema.go +++ b/helper/schema/schema.go @@ -11,8 +11,6 @@ // A good starting point is to view the Provider structure. package schema -//go:generate stringer -type=ValueType - import ( "fmt" "os" @@ -25,47 +23,6 @@ import ( "github.com/mitchellh/mapstructure" ) -// ValueType is an enum of the type that can be represented by a schema. -type ValueType int - -const ( - TypeInvalid ValueType = iota - TypeBool - TypeInt - TypeFloat - TypeString - TypeList - TypeMap - TypeSet - typeObject -) - -// Zero returns the zero value for a type. -func (t ValueType) Zero() interface{} { - switch t { - case TypeInvalid: - return nil - case TypeBool: - return false - case TypeInt: - return 0 - case TypeFloat: - return 0.0 - case TypeString: - return "" - case TypeList: - return []interface{}{} - case TypeMap: - return map[string]interface{}{} - case TypeSet: - return nil - case typeObject: - return map[string]interface{}{} - default: - panic(fmt.Sprintf("unknown type %s", t)) - } -} - // Schema is used to describe the structure of a value. // // Read the documentation of the struct elements for important details. diff --git a/helper/schema/valuetype.go b/helper/schema/valuetype.go new file mode 100644 index 000000000..b7b7ac810 --- /dev/null +++ b/helper/schema/valuetype.go @@ -0,0 +1,46 @@ +package schema + +//go:generate stringer -type=ValueType valuetype.go + +import "fmt" + +// ValueType is an enum of the type that can be represented by a schema. +type ValueType int + +const ( + TypeInvalid ValueType = iota + TypeBool + TypeInt + TypeFloat + TypeString + TypeList + TypeMap + TypeSet + typeObject +) + +// Zero returns the zero value for a type. +func (t ValueType) Zero() interface{} { + switch t { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return 0 + case TypeFloat: + return 0.0 + case TypeString: + return "" + case TypeList: + return []interface{}{} + case TypeMap: + return map[string]interface{}{} + case TypeSet: + return nil + case typeObject: + return map[string]interface{}{} + default: + panic(fmt.Sprintf("unknown type %s", t)) + } +} diff --git a/helper/schema/valuetype_string.go b/helper/schema/valuetype_string.go index c011d3ec2..484a83963 100644 --- a/helper/schema/valuetype_string.go +++ b/helper/schema/valuetype_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=ValueType; DO NOT EDIT +// generated by stringer -type=ValueType valuetype.go; DO NOT EDIT package schema @@ -6,11 +6,16 @@ import "fmt" const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" -var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} +var _ValueType_index = [...]uint8{11, 19, 26, 35, 45, 53, 60, 67, 77} func (i ValueType) String() string { - if i < 0 || i+1 >= ValueType(len(_ValueType_index)) { + if i < 0 || i >= ValueType(len(_ValueType_index)) { return fmt.Sprintf("ValueType(%d)", i) } - return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] + hi := _ValueType_index[i] + lo := uint8(0) + if i > 0 { + lo = _ValueType_index[i-1] + } + return _ValueType_name[lo:hi] } From f6367a779abac2c8b66ed10ddfe56afe1d156415 Mon Sep 17 00:00:00 2001 From: INADA Naoki Date: Wed, 4 Feb 2015 01:54:14 +0900 Subject: [PATCH 18/32] regenerate with new stringer. --- helper/schema/valuetype_string.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/helper/schema/valuetype_string.go b/helper/schema/valuetype_string.go index 484a83963..fec00944e 100644 --- a/helper/schema/valuetype_string.go +++ b/helper/schema/valuetype_string.go @@ -6,16 +6,11 @@ import "fmt" const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" -var _ValueType_index = [...]uint8{11, 19, 26, 35, 45, 53, 60, 67, 77} +var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} func (i ValueType) String() string { - if i < 0 || i >= ValueType(len(_ValueType_index)) { + if i < 0 || i+1 >= ValueType(len(_ValueType_index)) { return fmt.Sprintf("ValueType(%d)", i) } - hi := _ValueType_index[i] - lo := uint8(0) - if i > 0 { - lo = _ValueType_index[i-1] - } - return _ValueType_name[lo:hi] + return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] } From 92335b742a2a2878bd6eea44360582492e662ba8 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 3 Feb 2015 13:11:05 -0600 Subject: [PATCH 19/32] provider/aws: aws_main_route_table_association This resource allows an existing Route Table to be assigned as the "main" Route Table of a VPC. This means that the Route Table will be used for any subnets within the VPC without an explicit Route Table assigned [1]. This is particularly useful in getting an Internet Gateway in place as the default for a VPC, since the automatically created Main Route Table does not have one [2]. Note that this resource is an abstraction over an association and does not map directly to a CRUD-able object in AWS. In order to retain a coherent "Delete" operation for this resource, we remember the ID of the AWS-created Route Table and reset the VPC's main Route Table to it when this resource is deleted. refs #843, #748 [1] http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html#RouteTableDetails [2] http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Internet_Gateway.html#Add_IGW_Routing --- builtin/providers/aws/provider.go | 41 ++--- ...source_aws_main_route_table_association.go | 155 ++++++++++++++++++ ...e_aws_main_route_table_association_test.go | 148 +++++++++++++++++ 3 files changed, 324 insertions(+), 20 deletions(-) create mode 100644 builtin/providers/aws/resource_aws_main_route_table_association.go create mode 100644 builtin/providers/aws/resource_aws_main_route_table_association_test.go diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 3c417d32d..90e43011a 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -45,26 +45,27 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "aws_autoscaling_group": resourceAwsAutoscalingGroup(), - "aws_db_instance": resourceAwsDbInstance(), - "aws_db_parameter_group": resourceAwsDbParameterGroup(), - "aws_db_security_group": resourceAwsDbSecurityGroup(), - "aws_db_subnet_group": resourceAwsDbSubnetGroup(), - "aws_eip": resourceAwsEip(), - "aws_elb": resourceAwsElb(), - "aws_instance": resourceAwsInstance(), - "aws_internet_gateway": resourceAwsInternetGateway(), - "aws_key_pair": resourceAwsKeyPair(), - "aws_launch_configuration": resourceAwsLaunchConfiguration(), - "aws_network_acl": resourceAwsNetworkAcl(), - "aws_route53_record": resourceAwsRoute53Record(), - "aws_route53_zone": resourceAwsRoute53Zone(), - "aws_route_table": resourceAwsRouteTable(), - "aws_route_table_association": resourceAwsRouteTableAssociation(), - "aws_s3_bucket": resourceAwsS3Bucket(), - "aws_security_group": resourceAwsSecurityGroup(), - "aws_subnet": resourceAwsSubnet(), - "aws_vpc": resourceAwsVpc(), + "aws_autoscaling_group": resourceAwsAutoscalingGroup(), + "aws_db_instance": resourceAwsDbInstance(), + "aws_db_parameter_group": resourceAwsDbParameterGroup(), + "aws_db_security_group": resourceAwsDbSecurityGroup(), + "aws_db_subnet_group": resourceAwsDbSubnetGroup(), + "aws_eip": resourceAwsEip(), + "aws_elb": resourceAwsElb(), + "aws_instance": resourceAwsInstance(), + "aws_internet_gateway": resourceAwsInternetGateway(), + "aws_key_pair": resourceAwsKeyPair(), + "aws_launch_configuration": resourceAwsLaunchConfiguration(), + "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), + "aws_network_acl": resourceAwsNetworkAcl(), + "aws_route53_record": resourceAwsRoute53Record(), + "aws_route53_zone": resourceAwsRoute53Zone(), + "aws_route_table": resourceAwsRouteTable(), + "aws_route_table_association": resourceAwsRouteTableAssociation(), + "aws_s3_bucket": resourceAwsS3Bucket(), + "aws_security_group": resourceAwsSecurityGroup(), + "aws_subnet": resourceAwsSubnet(), + "aws_vpc": resourceAwsVpc(), }, ConfigureFunc: providerConfigure, diff --git a/builtin/providers/aws/resource_aws_main_route_table_association.go b/builtin/providers/aws/resource_aws_main_route_table_association.go new file mode 100644 index 000000000..f656f3760 --- /dev/null +++ b/builtin/providers/aws/resource_aws_main_route_table_association.go @@ -0,0 +1,155 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/mitchellh/goamz/ec2" +) + +func resourceAwsMainRouteTableAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsMainRouteTableAssociationCreate, + Read: resourceAwsMainRouteTableAssociationRead, + Update: resourceAwsMainRouteTableAssociationUpdate, + Delete: resourceAwsMainRouteTableAssociationDelete, + + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "route_table_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + // We use this field to record the main route table that is automatically + // created when the VPC is created. We need this to be able to "destroy" + // our main route table association, which we do by returning this route + // table to its original place as the Main Route Table for the VPC. + "original_route_table_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsMainRouteTableAssociationCreate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + vpcId := d.Get("vpc_id").(string) + routeTableId := d.Get("route_table_id").(string) + + log.Printf("[INFO] Creating main route table association: %s => %s", vpcId, routeTableId) + + mainAssociation, err := findMainRouteTableAssociation(ec2conn, vpcId) + if err != nil { + return err + } + + resp, err := ec2conn.ReassociateRouteTable( + mainAssociation.AssociationId, + routeTableId, + ) + if err != nil { + return err + } + + d.Set("original_route_table_id", mainAssociation.RouteTableId) + d.SetId(resp.AssociationId) + log.Printf("[INFO] New main route table association ID: %s", d.Id()) + + return nil +} + +func resourceAwsMainRouteTableAssociationRead(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + mainAssociation, err := findMainRouteTableAssociation( + ec2conn, + d.Get("vpc_id").(string)) + if err != nil { + return err + } + + if mainAssociation.AssociationId != d.Id() { + // It seems it doesn't exist anymore, so clear the ID + d.SetId("") + } + + return nil +} + +// Update is almost exactly like Create, except we want to retain the +// original_route_table_id - this needs to stay recorded as the AWS-created +// table from VPC creation. +func resourceAwsMainRouteTableAssociationUpdate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + vpcId := d.Get("vpc_id").(string) + routeTableId := d.Get("route_table_id").(string) + + log.Printf("[INFO] Updating main route table association: %s => %s", vpcId, routeTableId) + + resp, err := ec2conn.ReassociateRouteTable(d.Id(), routeTableId) + if err != nil { + return err + } + + d.SetId(resp.AssociationId) + log.Printf("[INFO] New main route table association ID: %s", d.Id()) + + return nil +} + +func resourceAwsMainRouteTableAssociationDelete(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + vpcId := d.Get("vpc_id").(string) + originalRouteTableId := d.Get("original_route_table_id").(string) + + log.Printf("[INFO] Deleting main route table association by resetting Main Route Table for VPC: %s to its original Route Table: %s", + vpcId, + originalRouteTableId) + + resp, err := ec2conn.ReassociateRouteTable(d.Id(), originalRouteTableId) + if err != nil { + return err + } + + log.Printf("[INFO] Resulting Association ID: %s", resp.AssociationId) + + return nil +} + +func findMainRouteTableAssociation(ec2conn *ec2.EC2, vpcId string) (*ec2.RouteTableAssociation, error) { + mainRouteTable, err := findMainRouteTable(ec2conn, vpcId) + if err != nil { + return nil, err + } + + for _, a := range mainRouteTable.Associations { + if a.Main { + return &a, nil + } + } + return nil, fmt.Errorf("Could not find main routing table association for VPC: %s", vpcId) +} + +func findMainRouteTable(ec2conn *ec2.EC2, vpcId string) (*ec2.RouteTable, error) { + filter := ec2.NewFilter() + filter.Add("association.main", "true") + filter.Add("vpc-id", vpcId) + routeResp, err := ec2conn.DescribeRouteTables(nil, filter) + if err != nil { + return nil, err + } else if len(routeResp.RouteTables) != 1 { + return nil, fmt.Errorf( + "Expected to find a single main routing table for VPC: %s, but found %d", + vpcId, + len(routeResp.RouteTables)) + } + + return &routeResp.RouteTables[0], nil +} diff --git a/builtin/providers/aws/resource_aws_main_route_table_association_test.go b/builtin/providers/aws/resource_aws_main_route_table_association_test.go new file mode 100644 index 000000000..937014cae --- /dev/null +++ b/builtin/providers/aws/resource_aws_main_route_table_association_test.go @@ -0,0 +1,148 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSMainRouteTableAssociation(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMainRouteTableAssociationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccMainRouteTableAssociationConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckMainRouteTableAssociation( + "aws_main_route_table_association.foo", + "aws_vpc.foo", + "aws_route_table.foo", + ), + ), + }, + resource.TestStep{ + Config: testAccMainRouteTableAssociationConfigUpdate, + Check: resource.ComposeTestCheckFunc( + testAccCheckMainRouteTableAssociation( + "aws_main_route_table_association.foo", + "aws_vpc.foo", + "aws_route_table.bar", + ), + ), + }, + }, + }) +} + +func testAccCheckMainRouteTableAssociationDestroy(s *terraform.State) error { + if len(s.RootModule().Resources) > 0 { + return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources) + } + + return nil +} + +func testAccCheckMainRouteTableAssociation( + mainRouteTableAssociationResource string, + vpcResource string, + routeTableResource string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[mainRouteTableAssociationResource] + if !ok { + return fmt.Errorf("Not found: %s", mainRouteTableAssociationResource) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + vpc, ok := s.RootModule().Resources[vpcResource] + if !ok { + return fmt.Errorf("Not found: %s", vpcResource) + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + mainAssociation, err := findMainRouteTableAssociation(conn, vpc.Primary.ID) + if err != nil { + return err + } + + if mainAssociation.AssociationId != rs.Primary.ID { + return fmt.Errorf("Found wrong main association: %s", + mainAssociation.AssociationId) + } + + return nil + } +} + +const testAccMainRouteTableAssociationConfig = ` +resource "aws_vpc" "foo" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_subnet" "foo" { + vpc_id = "${aws_vpc.foo.id}" + cidr_block = "10.1.1.0/24" +} + +resource "aws_internet_gateway" "foo" { + vpc_id = "${aws_vpc.foo.id}" +} + +resource "aws_route_table" "foo" { + vpc_id = "${aws_vpc.foo.id}" + route { + cidr_block = "10.0.0.0/8" + gateway_id = "${aws_internet_gateway.foo.id}" + } +} + +resource "aws_main_route_table_association" "foo" { + vpc_id = "${aws_vpc.foo.id}" + route_table_id = "${aws_route_table.foo.id}" +} +` + +const testAccMainRouteTableAssociationConfigUpdate = ` +resource "aws_vpc" "foo" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_subnet" "foo" { + vpc_id = "${aws_vpc.foo.id}" + cidr_block = "10.1.1.0/24" +} + +resource "aws_internet_gateway" "foo" { + vpc_id = "${aws_vpc.foo.id}" +} + +// Need to keep the old route table around when we update the +// main_route_table_association, otherwise Terraform will try to destroy the +// route table too early, and will fail because it's still the main one +resource "aws_route_table" "foo" { + vpc_id = "${aws_vpc.foo.id}" + route { + cidr_block = "10.0.0.0/8" + gateway_id = "${aws_internet_gateway.foo.id}" + } +} + +resource "aws_route_table" "bar" { + vpc_id = "${aws_vpc.foo.id}" + route { + cidr_block = "10.0.0.0/8" + gateway_id = "${aws_internet_gateway.foo.id}" + } +} + +resource "aws_main_route_table_association" "foo" { + vpc_id = "${aws_vpc.foo.id}" + route_table_id = "${aws_route_table.bar.id}" +} +` From f852a01c2206b04cf26444ab6d447b4ad5ded1b3 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 3 Feb 2015 15:09:16 -0600 Subject: [PATCH 20/32] providers/aws: docs for aws_main_route_table_association --- .../r/main_route_table_assoc.html.markdown | 44 +++++++++++++++++++ .../docs/providers/aws/r/vpc.html.markdown | 3 +- website/source/layouts/aws.erb | 4 ++ 3 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown diff --git a/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown b/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown new file mode 100644 index 000000000..a89d2ddee --- /dev/null +++ b/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown @@ -0,0 +1,44 @@ +--- +layout: "aws" +page_title: "AWS: aws_main_route_table_association" +sidebar_current: "docs-aws-resource-main-route-table-assoc" +description: |- + Provides a resource for managing the main routing table of a VPC. +--- + +# aws\_main\_route\_table\_association + +Provides a resource for managing the main routing table of a VPC. + +## Example Usage + +``` +resource "aws_main_route_table_association" "a" { + vpc_id = "${aws_vpc.foo.id}" + route_table_id = "${aws_route_table.bar.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `vpc_id` - (Required) The ID of the VPC whose main route table should be set +* `route_table_id` - (Required) The ID of the Route Table to set as the new + main route table for the target VPC + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the Route Table Association +* `original_route_table_id` - Used internally, see __Notes__ below + +## Notes + +On VPC creation, the AWS API always creates an initial Main Route Table. This +resource records the ID of that Route Table under `original_route_table_id`. +The "Delete" action for a `main_route_table_association` consists of resetting +this original table as the Main Route Table for the VPC. You'll see this +additional Route Table in the AWS console; it must remain intact in order for +the `main_route_table_association` delete to work properly. diff --git a/website/source/docs/providers/aws/r/vpc.html.markdown b/website/source/docs/providers/aws/r/vpc.html.markdown index f2ab8da16..48e56d340 100644 --- a/website/source/docs/providers/aws/r/vpc.html.markdown +++ b/website/source/docs/providers/aws/r/vpc.html.markdown @@ -53,6 +53,7 @@ The following attributes are exported: * `enable_dns_support` - Whether or not the VPC has DNS support * `enable_dns_hostnames` - Whether or not the VPC has DNS hostname support * `main_route_table_id` - The ID of the main route table associated with - this VPC. + this VPC. Note that you can change a VPC's main route table by using an + [`aws_main_route_table_association`](/docs/providers/aws/r/main_route_table_assoc.html). * `default_network_acl_id` - The ID of the network ACL created by default on VPC creation * `default_security_group_id` - The ID of the security group created by default on VPC creation diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index d79f44580..030192dfd 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -53,6 +53,10 @@ aws_launch_configuration + > + aws_main_route_table_association + + > aws_network_acl From a07ff4bacb1ad961b2158474e67cc1f651f82ee1 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Tue, 3 Feb 2015 16:16:41 -0500 Subject: [PATCH 21/32] Remove service_accounts (legacy dupe of service_account) --- .../google/resource_compute_instance.go | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index 33664f013..98e9faf95 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -109,30 +109,6 @@ func resourceComputeInstance() *schema.Resource { }, }, - "service_accounts": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "email": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "scopes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "can_ip_forward": &schema.Schema{ Type: schema.TypeBool, Optional: true, From 4908f944908fde7a03c5138330a32feeef0fd00c Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 3 Feb 2015 16:20:03 -0600 Subject: [PATCH 22/32] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed79e9cd9..636e82ada 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ IMPROVEMENTS: `map_public_ip_on_launch` is not specified in config. [GH-898] * provider/google: Remove "client secrets file", as it's no longer necessary for API authentication [GH-884]. + * provider/google: Expose `self_link` on `google_compute_instance` [GH-906] BUG FIXES: From 680fa3c0d8022caa8e4fa37071a93f39dbc521e3 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 3 Feb 2015 16:45:01 -0600 Subject: [PATCH 23/32] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 636e82ada..69c05ee26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ IMPROVEMENTS: * **New resources: `google_compute_forwarding_rule`, `google_compute_http_health_check`, and `google_compute_target_pool`** - Together these provide network-level load balancing. [GH-588] + * **New resource: `aws_main_route_table_association`** - Manage the main routing table + of a VPC. [GH-918] * core: Formalized the syntax of interpolations and documented it very heavily. * core: Strings in interpolations can now contain further interpolations, From 219aa3e7887f47e056a52072a7f886d682510deb Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 3 Feb 2015 19:48:25 -0600 Subject: [PATCH 24/32] helper/schema: fix DiffFieldReader map handling An `InstanceDiff` will include `ResourceAttrDiff` entries for the "length" / `#` field of maps. This makes sense, since for something like `terraform plan` it's useful to see when counts are changing. The `DiffFieldReader` was not taking these entries into account when reading maps out, and was therefore incorrectly returning maps that included an extra `'#'` field, which was causing all sorts of havoc for providers (extra tags on AWS instances, broken google compute instance launch, possibly others). * fixes #914 - extra tags on AWS instances * fixes #883 - general core issue sprouted from #757 * removes the hack+TODO from #757 --- .../aws/resource_aws_instance_test.go | 2 + helper/schema/field_reader_diff.go | 5 +++ helper/schema/field_reader_diff_test.go | 45 +++++++++++++++++++ 3 files changed, 52 insertions(+) diff --git a/builtin/providers/aws/resource_aws_instance_test.go b/builtin/providers/aws/resource_aws_instance_test.go index 73da0197f..7275dde92 100644 --- a/builtin/providers/aws/resource_aws_instance_test.go +++ b/builtin/providers/aws/resource_aws_instance_test.go @@ -180,6 +180,8 @@ func TestAccInstance_tags(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists("aws_instance.foo", &v), testAccCheckTags(&v.Tags, "foo", "bar"), + // Guard against regression of https://github.com/hashicorp/terraform/issues/914 + testAccCheckTags(&v.Tags, "#", ""), ), }, diff --git a/helper/schema/field_reader_diff.go b/helper/schema/field_reader_diff.go index aaacd5d68..378909fd3 100644 --- a/helper/schema/field_reader_diff.go +++ b/helper/schema/field_reader_diff.go @@ -82,6 +82,11 @@ func (r *DiffFieldReader) readMap( if !strings.HasPrefix(k, prefix) { continue } + if strings.HasPrefix(k, prefix+"#") { + // Ignore the count field + continue + } + resultSet = true k = k[len(prefix):] diff --git a/helper/schema/field_reader_diff_test.go b/helper/schema/field_reader_diff_test.go index cc07bc013..fbb10fcaf 100644 --- a/helper/schema/field_reader_diff_test.go +++ b/helper/schema/field_reader_diff_test.go @@ -11,6 +11,51 @@ func TestDiffFieldReader_impl(t *testing.T) { var _ FieldReader = new(DiffFieldReader) } +// https://github.com/hashicorp/terraform/issues/914 +func TestDiffFieldReader_MapHandling(t *testing.T) { + schema := map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + }, + } + r := &DiffFieldReader{ + Schema: schema, + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "tags.#": &terraform.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "tags.baz": &terraform.ResourceAttrDiff{ + Old: "", + New: "qux", + }, + }, + }, + Source: &MapFieldReader{ + Schema: schema, + Map: BasicMapReader(map[string]string{ + "tags.#": "1", + "tags.foo": "bar", + }), + }, + } + + result, err := r.ReadField([]string{"tags"}) + if err != nil { + t.Fatalf("ReadField failed: %#v", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "baz": "qux", + } + + if !reflect.DeepEqual(expected, result.Value) { + t.Fatalf("bad: DiffHandling\n\nexpected: %#v\n\ngot: %#v\n\n", expected, result.Value) + } +} + func TestDiffFieldReader_extra(t *testing.T) { schema := map[string]*Schema{ "stringComputed": &Schema{Type: TypeString}, From 4e8e3dad863fc18fc15a56f9f1a2756970048ca0 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 4 Feb 2015 09:25:45 -0600 Subject: [PATCH 25/32] DiffFieldReader: filter all '#' fields from sets Now that readMap filters out '#' fields, when maps are nested in sets, we exposed a related bug where a set was iterating over nested maps and expected the '#' key to be present in those nested maps. By skipping _all_ count fields when iterating over set keys, all is right with the world again. --- helper/schema/field_reader_diff.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/helper/schema/field_reader_diff.go b/helper/schema/field_reader_diff.go index 378909fd3..ec875421b 100644 --- a/helper/schema/field_reader_diff.go +++ b/helper/schema/field_reader_diff.go @@ -153,8 +153,8 @@ func (r *DiffFieldReader) readSet( if !strings.HasPrefix(k, prefix) { continue } - if strings.HasPrefix(k, prefix+"#") { - // Ignore the count field + if strings.HasSuffix(k, "#") { + // Ignore any count field continue } From b3850935021459f684db9576de10dae7f6446843 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Fri, 6 Feb 2015 03:21:22 -0500 Subject: [PATCH 26/32] Deprecated 'network', introduce 'network_interface' --- .../google/resource_compute_instance.go | 432 ++++++++++++------ .../google/resource_compute_instance_test.go | 127 ++++- .../google/r/compute_instance.html.markdown | 30 +- 3 files changed, 447 insertions(+), 142 deletions(-) diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index 98e9faf95..5093d1b67 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -75,20 +75,61 @@ func resourceComputeInstance() *schema.Resource { }, }, + "network_interface": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "access_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "network": &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "source": &schema.Schema{ Type: schema.TypeString, Required: true, + ForceNew: true, }, "address": &schema.Schema{ Type: schema.TypeString, Optional: true, + ForceNew: true, }, "name": &schema.Schema{ @@ -173,6 +214,33 @@ func resourceComputeInstance() *schema.Resource { } } +func resourceOperationWaitZone( + config *Config, op *compute.Operation, zone string, activity string) error { + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: zone, + Type: OperationWaitZone, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + return nil +} + + func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -258,32 +326,80 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disks = append(disks, &disk) } - // Build up the list of networks networksCount := d.Get("network.#").(int) - networks := make([]*compute.NetworkInterface, 0, networksCount) - for i := 0; i < networksCount; i++ { - prefix := fmt.Sprintf("network.%d", i) - // Load up the name of this network - networkName := d.Get(prefix + ".source").(string) - network, err := config.clientCompute.Networks.Get( - config.Project, networkName).Do() - if err != nil { - return fmt.Errorf( - "Error loading network '%s': %s", - networkName, err) - } + networkInterfacesCount := d.Get("network_interface.#").(int) - // Build the disk - var iface compute.NetworkInterface - iface.AccessConfigs = []*compute.AccessConfig{ - &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", - NatIP: d.Get(prefix + ".address").(string), - }, - } - iface.Network = network.SelfLink + if networksCount > 0 && networkInterfacesCount > 0 { + return fmt.Errorf("Error: cannot define both networks and network_interfaces.") + } + if networksCount == 0 && networkInterfacesCount == 0 { + return fmt.Errorf("Error: Must define at least one network_interface.") + } - networks = append(networks, &iface) + var networkInterfaces []*compute.NetworkInterface + + if networksCount > 0 { + // TODO: Delete this block when removing network { } + // Build up the list of networkInterfaces + networkInterfaces = make([]*compute.NetworkInterface, 0, networksCount) + for i := 0; i < networksCount; i++ { + prefix := fmt.Sprintf("network.%d", i) + // Load up the name of this network + networkName := d.Get(prefix + ".source").(string) + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error loading network '%s': %s", + networkName, err) + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.AccessConfigs = []*compute.AccessConfig{ + &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(prefix + ".address").(string), + }, + } + iface.Network = network.SelfLink + + networkInterfaces = append(networkInterfaces, &iface) + } + } + + if networkInterfacesCount > 0 { + // Build up the list of networkInterfaces + networkInterfaces = make([]*compute.NetworkInterface, 0, networkInterfacesCount) + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + // Load up the name of this network_interfac + networkName := d.Get(prefix + ".network").(string) + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error referencing network '%s': %s", + networkName, err) + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.Network = network.SelfLink + + // Handle access_config structs + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + iface.AccessConfigs[j] = &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + } + + networkInterfaces = append(networkInterfaces, &iface) + } } serviceAccountsCount := d.Get("service_account.#").(int) @@ -314,7 +430,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err MachineType: machineType.SelfLink, Metadata: resourceInstanceMetadata(d), Name: d.Get("name").(string), - NetworkInterfaces: networks, + NetworkInterfaces: networkInterfaces, Tags: resourceInstanceTags(d), ServiceAccounts: serviceAccounts, } @@ -330,28 +446,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err d.SetId(instance.Name) // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: zone.Name, - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { + waitErr := resourceOperationWaitZone(config, op, zone.Name, "instance to create") + if waitErr != nil { // The resource didn't actually create d.SetId("") - - // Return the error - return OperationError(*op.Error) + return waitErr } return resourceComputeInstanceRead(d, meta) @@ -385,26 +484,85 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } } + networksCount := d.Get("network.#").(int) + networkInterfacesCount := d.Get("network_interface.#").(int) + + if networksCount > 0 && networkInterfacesCount > 0 { + return fmt.Errorf("Error: cannot define both networks and network_interfaces.") + } + if networksCount == 0 && networkInterfacesCount == 0 { + return fmt.Errorf("Error: Must define at least one network_interface.") + } + // Set the networks + // Use the first external IP found for the default connection info. externalIP := "" - for i, iface := range instance.NetworkInterfaces { - prefix := fmt.Sprintf("network.%d", i) - d.Set(prefix+".name", iface.Name) + internalIP := "" + if networksCount > 0 { + // TODO: Remove this when realizing deprecation of .network + for i, iface := range instance.NetworkInterfaces { + prefix := fmt.Sprintf("network.%d", i) + d.Set(prefix+".name", iface.Name) + log.Printf(prefix+".name = %s", iface.Name) - // Use the first external IP found for the default connection info. - natIP := resourceInstanceNatIP(iface) - if externalIP == "" && natIP != "" { - externalIP = natIP + var natIP string + for _, config := range iface.AccessConfigs { + if config.Type == "ONE_TO_ONE_NAT" { + natIP = config.NatIP + break + } + } + + if externalIP == "" && natIP != "" { + externalIP = natIP + } + d.Set(prefix+".external_address", natIP) + + d.Set(prefix+".internal_address", iface.NetworkIP) } - d.Set(prefix+".external_address", natIP) + } - d.Set(prefix+".internal_address", iface.NetworkIP) + if networkInterfacesCount > 0 { + for i, iface := range instance.NetworkInterfaces { + + prefix := fmt.Sprintf("network_interface.%d", i) + d.Set(prefix+".name", iface.Name) + + // The first non-empty ip is left in natIP + var natIP string + for j, config := range iface.AccessConfigs { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + d.Set(acPrefix+".nat_ip", config.NatIP) + if natIP == "" { + natIP = config.NatIP + } + } + + if externalIP == "" { + externalIP = natIP + } + + d.Set(prefix+".address", iface.NetworkIP) + if internalIP == "" { + internalIP = iface.NetworkIP + } + + + } + } + + // Fall back on internal ip if there is no external ip. This makes sense in the situation where + // terraform is being used on a cloud instance and can therefore access the instances it creates + // via their internal ips. + sshIP := externalIP + if sshIP == "" { + sshIP = internalIP } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", - "host": externalIP, + "host": sshIP, }) // Set the metadata fingerprint if there is one. @@ -423,6 +581,21 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + zone := d.Get("zone").(string) + + instance, err := config.clientCompute.Instances.Get( + config.Project, zone, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading instance: %s", err) + } + // Enable partial mode for the resource since it is possible d.Partial(true) @@ -430,30 +603,15 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("metadata") { metadata := resourceInstanceMetadata(d) op, err := config.clientCompute.Instances.SetMetadata( - config.Project, d.Get("zone").(string), d.Id(), metadata).Do() + config.Project, zone, d.Id(), metadata).Do() if err != nil { return fmt.Errorf("Error updating metadata: %s", err) } - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 1 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for metadata to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + // 1 5 2 + opErr := resourceOperationWaitZone(config, op, zone, "metadata to update") + if opErr != nil { + return opErr } d.SetPartial("metadata") @@ -462,35 +620,80 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("tags") { tags := resourceInstanceTags(d) op, err := config.clientCompute.Instances.SetTags( - config.Project, d.Get("zone").(string), d.Id(), tags).Do() + config.Project, zone, d.Id(), tags).Do() if err != nil { return fmt.Errorf("Error updating tags: %s", err) } - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 1 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for tags to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + opErr := resourceOperationWaitZone(config, op, zone, "tags to update") + if opErr != nil { + return opErr } d.SetPartial("tags") } + networkInterfacesCount := d.Get("network_interface.#").(int) + if networkInterfacesCount > 0 { + // Sanity check + if networkInterfacesCount != len(instance.NetworkInterfaces) { + return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) + } + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + instNetworkInterface := instance.NetworkInterfaces[i] + networkName := d.Get(prefix+".name").(string) + + // TODO: This sanity check is broken by #929, disabled for now (by forcing the equality) + networkName = instNetworkInterface.Name + // Sanity check + if networkName != instNetworkInterface.Name { + return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name) + } + + if d.HasChange(prefix+".access_config") { + + // TODO: This code deletes then recreates accessConfigs. This is bad because it may + // leave the machine inaccessible from either ip if the creation part fails (network + // timeout etc). However right now there is a GCE limit of 1 accessConfig so it is + // the only way to do it. In future this should be revised to only change what is + // necessary, and also add before removing. + + // Delete any accessConfig that currently exists in instNetworkInterface + for _, ac := range(instNetworkInterface.AccessConfigs) { + op, err := config.clientCompute.Instances.DeleteAccessConfig( + config.Project, zone, d.Id(), networkName, ac.Name).Do(); + if err != nil { + return fmt.Errorf("Error deleting old access_config: %s", err) + } + opErr := resourceOperationWaitZone(config, op, zone, "old access_config to delete") + if opErr != nil { + return opErr + } + } + + // Create new ones + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + ac := &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + op, err := config.clientCompute.Instances.AddAccessConfig( + config.Project, zone, d.Id(), networkName, ac).Do(); + if err != nil { + return fmt.Errorf("Error adding new access_config: %s", err) + } + opErr := resourceOperationWaitZone(config, op, zone, "new access_config to add") + if opErr != nil { + return opErr + } + } + } + } + } + // We made it, disable partial mode d.Partial(false) @@ -500,32 +703,16 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - op, err := config.clientCompute.Instances.Delete( - config.Project, d.Get("zone").(string), d.Id()).Do() + zone := d.Get("zone").(string) + op, err := config.clientCompute.Instances.Delete(config.Project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting instance: %s", err) } // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 5 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + opErr := resourceOperationWaitZone(config, op, zone, "instance to delete") + if opErr != nil { + return opErr } d.SetId("") @@ -577,16 +764,3 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { return tags } - -// resourceInstanceNatIP acquires the first NatIP with a "ONE_TO_ONE_NAT" type -// in the compute.NetworkInterface's AccessConfigs. -func resourceInstanceNatIP(iface *compute.NetworkInterface) (natIP string) { - for _, config := range iface.AccessConfigs { - if config.Type == "ONE_TO_ONE_NAT" { - natIP = config.NatIP - break - } - } - - return natIP -} diff --git a/builtin/providers/google/resource_compute_instance_test.go b/builtin/providers/google/resource_compute_instance_test.go index f765a44c4..226406665 100644 --- a/builtin/providers/google/resource_compute_instance_test.go +++ b/builtin/providers/google/resource_compute_instance_test.go @@ -10,6 +10,28 @@ import ( "github.com/hashicorp/terraform/terraform" ) +func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + ), + }, + }, + }) +} + func TestAccComputeInstance_basic(t *testing.T) { var instance compute.Instance @@ -45,7 +67,7 @@ func TestAccComputeInstance_IP(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceNetwork(&instance), + testAccCheckComputeInstanceAccessConfigHasIP(&instance), ), }, }, @@ -73,6 +95,35 @@ func TestAccComputeInstance_disks(t *testing.T) { }) } +func TestAccComputeInstance_update_deprecated_network(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + resource.TestStep{ + Config: testAccComputeInstance_update_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "bar", "baz"), + testAccCheckComputeInstanceTag(&instance, "baz"), + ), + }, + }, + }) +} + func TestAccComputeInstance_update(t *testing.T) { var instance compute.Instance @@ -96,6 +147,7 @@ func TestAccComputeInstance_update(t *testing.T) { testAccCheckComputeInstanceMetadata( &instance, "bar", "baz"), testAccCheckComputeInstanceTag(&instance, "baz"), + testAccCheckComputeInstanceAccessConfig(&instance), ), }, }, @@ -173,7 +225,19 @@ func testAccCheckComputeInstanceMetadata( } } -func testAccCheckComputeInstanceNetwork(instance *compute.Instance) resource.TestCheckFunc { +func testAccCheckComputeInstanceAccessConfig(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if len(i.AccessConfigs) == 0 { + return fmt.Errorf("no access_config") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceAccessConfigHasIP(instance *compute.Instance) resource.TestCheckFunc { return func(s *terraform.State) error { for _, i := range instance.NetworkInterfaces { for _, c := range i.AccessConfigs { @@ -219,7 +283,7 @@ func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resour } } -const testAccComputeInstance_basic = ` +const testAccComputeInstance_basic_deprecated_network = ` resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" @@ -240,7 +304,7 @@ resource "google_compute_instance" "foobar" { } }` -const testAccComputeInstance_update = ` +const testAccComputeInstance_update_deprecated_network = ` resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" @@ -260,6 +324,49 @@ resource "google_compute_instance" "foobar" { } }` +const testAccComputeInstance_basic = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } +}` + +// Update metadata, tags, and network_interface +const testAccComputeInstance_update = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["baz"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + access_config { } + } + + metadata { + bar = "baz" + } +}` + const testAccComputeInstance_ip = ` resource "google_compute_address" "foo" { name = "foo" @@ -275,9 +382,11 @@ resource "google_compute_instance" "foobar" { image = "debian-7-wheezy-v20140814" } - network { - source = "default" - address = "${google_compute_address.foo.address}" + network_interface { + network = "default" + access_config { + nat_ip = "${google_compute_address.foo.address}" + } } metadata { @@ -307,8 +416,8 @@ resource "google_compute_instance" "foobar" { auto_delete = false } - network { - source = "default" + network_interface { + network = "default" } metadata { diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown index b2ea6baf7..91eb48102 100644 --- a/website/source/docs/providers/google/r/compute_instance.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance.html.markdown @@ -27,8 +27,11 @@ resource "google_compute_instance" "default" { image = "debian-7-wheezy-v20140814" } - network { - source = "default" + network_interface { + network = "default" + access_config { + // Ephemeral IP + } } metadata { @@ -64,7 +67,11 @@ The following arguments are supported: * `metadata` - (Optional) Metadata key/value pairs to make available from within the instance. -* `network` - (Required) Networks to attach to the instance. This can be +* `network_interface` - (Required) Networks to attach to the instance. This can be + specified multiple times for multiple networks. Structure is documented + below. + +* `network` - (DEPRECATED, Required) Networks to attach to the instance. This can be specified multiple times for multiple networks. Structure is documented below. @@ -85,7 +92,22 @@ The `disk` block supports: * `type` - (Optional) The GCE disk type. -The `network` block supports: +The `network_interface` block supports: + +* `network` - (Required) The name of the network to attach this interface to. + +* `access_config` - (Optional) Access configurations, i.e. IPs via which this instance can be + accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet +(this means that ssh provisioners will not work unless you are running Terraform can send traffic to +the instance's network (e.g. via tunnel or because it is running on another cloud instance on that +network). This block can be repeated multiple times. Structure documented below. + +The `access_config` block supports: + +* `nat_ip` - (Optional) The IP address that will be 1:1 mapped to the instance's network ip. If not + given, one will be generated. + +(DEPRECATED) The `network` block supports: * `source` - (Required) The name of the network to attach this interface to. From 33eebbed510d4e44e99ded35ba67da64ac194a0d Mon Sep 17 00:00:00 2001 From: Julien Vey Date: Thu, 5 Feb 2015 11:37:52 +0100 Subject: [PATCH 27/32] provider/gce: Add description in firewall resource --- builtin/providers/google/resource_compute_firewall.go | 6 ++++++ builtin/providers/google/resource_compute_firewall_test.go | 2 ++ .../docs/providers/google/r/compute_firewall.html.markdown | 2 ++ 3 files changed, 10 insertions(+) diff --git a/builtin/providers/google/resource_compute_firewall.go b/builtin/providers/google/resource_compute_firewall.go index dfd020cc4..9cbe5b53b 100644 --- a/builtin/providers/google/resource_compute_firewall.go +++ b/builtin/providers/google/resource_compute_firewall.go @@ -26,6 +26,11 @@ func resourceComputeFirewall() *schema.Resource { ForceNew: true, }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "network": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -306,6 +311,7 @@ func resourceFirewall( // Build the firewall parameter return &compute.Firewall{ Name: d.Get("name").(string), + Description: d.Get("description").(string), Network: network.SelfLink, Allowed: allowed, SourceRanges: sourceRanges, diff --git a/builtin/providers/google/resource_compute_firewall_test.go b/builtin/providers/google/resource_compute_firewall_test.go index 58a6fd787..9bb92af20 100644 --- a/builtin/providers/google/resource_compute_firewall_test.go +++ b/builtin/providers/google/resource_compute_firewall_test.go @@ -126,6 +126,7 @@ resource "google_compute_network" "foobar" { resource "google_compute_firewall" "foobar" { name = "terraform-test" + description = "Resource created for Terraform acceptance testing" network = "${google_compute_network.foobar.name}" source_tags = ["foo"] @@ -142,6 +143,7 @@ resource "google_compute_network" "foobar" { resource "google_compute_firewall" "foobar" { name = "terraform-test" + description = "Resource created for Terraform acceptance testing" network = "${google_compute_network.foobar.name}" source_tags = ["foo"] diff --git a/website/source/docs/providers/google/r/compute_firewall.html.markdown b/website/source/docs/providers/google/r/compute_firewall.html.markdown index 638a4bfd2..f0ed797db 100644 --- a/website/source/docs/providers/google/r/compute_firewall.html.markdown +++ b/website/source/docs/providers/google/r/compute_firewall.html.markdown @@ -37,6 +37,8 @@ The following arguments are supported: * `name` - (Required) A unique name for the resource, required by GCE. Changing this forces a new resource to be created. +* `description` - (Optional) Textual description field. + * `network` - (Required) The name of the network to attach this firewall to. * `allow` - (Required) Can be specified multiple times for each allow From 481b3c7e3baeb5ecd257a405c6a59687de85be20 Mon Sep 17 00:00:00 2001 From: Greg Osuri Date: Fri, 6 Feb 2015 15:03:22 -0800 Subject: [PATCH 28/32] provider/aws: fix for #915, disabling ForceNew while updating elb healthchecks --- builtin/providers/aws/resource_aws_elb.go | 1 - 1 file changed, 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_elb.go b/builtin/providers/aws/resource_aws_elb.go index bffa1e0a2..b56d47d87 100644 --- a/builtin/providers/aws/resource_aws_elb.go +++ b/builtin/providers/aws/resource_aws_elb.go @@ -119,7 +119,6 @@ func resourceAwsElb() *schema.Resource { "health_check": &schema.Schema{ Type: schema.TypeSet, Optional: true, - ForceNew: true, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ From 61536cb1f8d3c49742967ed1b6003bdc6f21be46 Mon Sep 17 00:00:00 2001 From: Amadeus Demarzi Date: Fri, 6 Feb 2015 15:28:26 -0800 Subject: [PATCH 29/32] Attach Chainable timers to Engine render loop Previous to this commit, the Chainable class which is responsible for iterating on the sequence of events in the intro Terraform animation, was using setTimeout and thus could result in very messed up race conditions when triggering physics changes. This commit attaches the .wait timers to the Engine.render loop which should enable everything to stay in sync. --- .../source/assets/javascripts/app/Engine.js | 12 +++++++- .../assets/javascripts/lib/Chainable.js | 30 +++++++++++++++---- 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/website/source/assets/javascripts/app/Engine.js b/website/source/assets/javascripts/app/Engine.js index a918be3b0..e89d61074 100644 --- a/website/source/assets/javascripts/app/Engine.js +++ b/website/source/assets/javascripts/app/Engine.js @@ -68,7 +68,10 @@ Engine = Base.extend({ this.background.className += ' show'; this.canvas.style.opacity = 1; - new Chainable() + // We have to pass the engine into Chainable to + // enable the timers to properly attach to the + // run/render loop + new Chainable(this) .wait(1000) .then(function(){ this.starGeneratorRate = 200; @@ -202,6 +205,13 @@ Engine = Base.extend({ this.now = Date.now() / 1000; this.tick = Math.min(this.now - this.last, 0.017); + // We must attach the chainable timer to the engine + // run/render loop or else things can get pretty + // out of wack + if (this.updateChainTimer) { + this.updateChainTimer(this.tick); + } + // Update all particles... may need to be optimized for (p = 0; p < this.particles.length; p++) { this.particles[p].update(this); diff --git a/website/source/assets/javascripts/lib/Chainable.js b/website/source/assets/javascripts/lib/Chainable.js index edb7f1757..dbe51dd1d 100644 --- a/website/source/assets/javascripts/lib/Chainable.js +++ b/website/source/assets/javascripts/lib/Chainable.js @@ -1,12 +1,29 @@ (function(){ -var Chainable = function(){ +var Chainable = function(engine){ + this.engine = engine; this._chain = []; + this._updateTimer = this._updateTimer.bind(this); this._cycle = this._cycle.bind(this); }; Chainable.prototype._running = false; +Chainable.prototype._updateTimer = function(tick){ + this._timer += tick; + if (this._timer >= this._timerMax) { + this.resetTimer(); + this._cycle(); + } +}; + +Chainable.prototype.resetTimer = function(){ + this.engine.updateChainTimer = undefined; + this._timer = 0; + this._timerMax = 0; + return this; +}; + Chainable.prototype.start = function(){ if (this._running || !this._chain.length) { return this; @@ -19,9 +36,8 @@ Chainable.prototype.reset = function(){ if (!this._running) { return this; } - clearTimeout(this._timer); - this._timer = null; - this._chain.length = 0; + this.resetTimer(); + this._timer = 0; this._running = false; return this; }; @@ -40,8 +56,10 @@ Chainable.prototype._cycle = function(){ return this._cycle(); } if (current.type === 'wait') { - clearTimeout(this._timer); - this._timer = setTimeout(this._cycle, current.time || 0); + this.resetTimer(); + // Convert timer to seconds + this._timerMax = current.time / 1000; + this.engine.updateChainTimer = this._updateTimer; current = null; } From 92540d3d880abb1e5dc9d9b35a9b6bdc55df6f94 Mon Sep 17 00:00:00 2001 From: lalyos Date: Sat, 7 Feb 2015 16:55:59 +0100 Subject: [PATCH 30/32] fixing version numbers RCs should be labeled x.x.x-rcx see conversation with ryanuber: https://github.com/hashicorp/go-checkpoint/issues/2#issuecomment-73199209 --- checkpoint.go | 2 +- command/version.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/checkpoint.go b/checkpoint.go index ebe592b32..4837e4763 100644 --- a/checkpoint.go +++ b/checkpoint.go @@ -34,7 +34,7 @@ func runCheckpoint(c *Config) { version := Version if VersionPrerelease != "" { - version += fmt.Sprintf(".%s", VersionPrerelease) + version += fmt.Sprintf("-%s", VersionPrerelease) } signaturePath := filepath.Join(configDir, "checkpoint_signature") diff --git a/command/version.go b/command/version.go index 729f55aa7..d4c3a2f89 100644 --- a/command/version.go +++ b/command/version.go @@ -38,7 +38,7 @@ func (c *VersionCommand) Run(args []string) int { fmt.Fprintf(&versionString, "Terraform v%s", c.Version) if c.VersionPrerelease != "" { - fmt.Fprintf(&versionString, ".%s", c.VersionPrerelease) + fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) if c.Revision != "" { fmt.Fprintf(&versionString, " (%s)", c.Revision) From 4e4dcac27643b2ccd0124ed36b593bb85880c350 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Sat, 7 Feb 2015 19:03:18 -0500 Subject: [PATCH 31/32] Fix a number of healthcheck bugs --- .../resource_compute_http_health_check.go | 87 +++++++++++-------- ...resource_compute_http_health_check_test.go | 2 +- 2 files changed, 54 insertions(+), 35 deletions(-) diff --git a/builtin/providers/google/resource_compute_http_health_check.go b/builtin/providers/google/resource_compute_http_health_check.go index f4887641a..ca1f4eadd 100644 --- a/builtin/providers/google/resource_compute_http_health_check.go +++ b/builtin/providers/google/resource_compute_http_health_check.go @@ -21,25 +21,23 @@ func resourceComputeHttpHealthCheck() *schema.Resource { "check_interval_sec": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: false, + Computed: true, }, "description": &schema.Schema{ Type: schema.TypeString, Optional: true, - ForceNew: false, }, "healthy_threshold": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: false, + Computed: true, }, "host": &schema.Schema{ Type: schema.TypeString, Optional: true, - ForceNew: false, }, "name": &schema.Schema{ @@ -51,13 +49,13 @@ func resourceComputeHttpHealthCheck() *schema.Resource { "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: false, + Computed: true, }, "request_path": &schema.Schema{ Type: schema.TypeString, Optional: true, - ForceNew: false, + Computed: true, }, "self_link": &schema.Schema{ @@ -68,13 +66,13 @@ func resourceComputeHttpHealthCheck() *schema.Resource { "timeout_sec": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: false, + Computed: true, }, "unhealthy_threshold": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: false, + Computed: true, }, }, } @@ -85,25 +83,32 @@ func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface // Build the parameter hchk := &compute.HttpHealthCheck{ - Description: d.Get("description").(string), - Host: d.Get("host").(string), Name: d.Get("name").(string), - RequestPath: d.Get("request_path").(string), } - if d.Get("check_interval_sec") != nil { - hchk.CheckIntervalSec = int64(d.Get("check_interval_sec").(int)) + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) } - if d.Get("health_threshold") != nil { - hchk.HealthyThreshold = int64(d.Get("healthy_threshold").(int)) + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) } - if d.Get("port") != nil { - hchk.Port = int64(d.Get("port").(int)) + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) } - if d.Get("timeout") != nil { - hchk.TimeoutSec = int64(d.Get("timeout_sec").(int)) + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) } - if d.Get("unhealthy_threshold") != nil { - hchk.UnhealthyThreshold = int64(d.Get("unhealthy_threshold").(int)) + if v, ok := d.GetOk("health_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) } log.Printf("[DEBUG] HttpHealthCheck insert request: %#v", hchk) @@ -147,25 +152,32 @@ func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface // Build the parameter hchk := &compute.HttpHealthCheck{ - Description: d.Get("description").(string), - Host: d.Get("host").(string), Name: d.Get("name").(string), - RequestPath: d.Get("request_path").(string), } - if d.Get("check_interval_sec") != nil { - hchk.CheckIntervalSec = int64(d.Get("check_interval_sec").(int)) + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) } - if d.Get("health_threshold") != nil { - hchk.HealthyThreshold = int64(d.Get("healthy_threshold").(int)) + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) } - if d.Get("port") != nil { - hchk.Port = int64(d.Get("port").(int)) + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) } - if d.Get("timeout") != nil { - hchk.TimeoutSec = int64(d.Get("timeout_sec").(int)) + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) } - if d.Get("unhealthy_threshold") != nil { - hchk.UnhealthyThreshold = int64(d.Get("unhealthy_threshold").(int)) + if v, ok := d.GetOk("health_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) } log.Printf("[DEBUG] HttpHealthCheck patch request: %#v", hchk) @@ -220,6 +232,13 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error reading HttpHealthCheck: %s", err) } + d.Set("host", hchk.Host) + d.Set("request_path", hchk.RequestPath) + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("health_threshold", hchk.HealthyThreshold) + d.Set("port", hchk.Port) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) d.Set("self_link", hchk.SelfLink) return nil diff --git a/builtin/providers/google/resource_compute_http_health_check_test.go b/builtin/providers/google/resource_compute_http_health_check_test.go index 45181a4cd..1797e9831 100644 --- a/builtin/providers/google/resource_compute_http_health_check_test.go +++ b/builtin/providers/google/resource_compute_http_health_check_test.go @@ -72,7 +72,7 @@ func testAccCheckComputeHttpHealthCheckExists(n string) resource.TestCheckFunc { const testAccComputeHttpHealthCheck_basic = ` resource "google_compute_http_health_check" "foobar" { - check_interval_sec = 1 + check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" healthy_threshold = 3 host = "foobar" From fbe99605e6ebd38345c2aa147b3ec92600fc9f25 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Sat, 7 Feb 2015 19:05:19 -0500 Subject: [PATCH 32/32] Fix whitespace --- .../google/resource_compute_http_health_check.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/builtin/providers/google/resource_compute_http_health_check.go b/builtin/providers/google/resource_compute_http_health_check.go index ca1f4eadd..68a4c1348 100644 --- a/builtin/providers/google/resource_compute_http_health_check.go +++ b/builtin/providers/google/resource_compute_http_health_check.go @@ -232,13 +232,13 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error reading HttpHealthCheck: %s", err) } - d.Set("host", hchk.Host) - d.Set("request_path", hchk.RequestPath) - d.Set("check_interval_sec", hchk.CheckIntervalSec) - d.Set("health_threshold", hchk.HealthyThreshold) - d.Set("port", hchk.Port) - d.Set("timeout_sec", hchk.TimeoutSec) - d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) + d.Set("host", hchk.Host) + d.Set("request_path", hchk.RequestPath) + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("health_threshold", hchk.HealthyThreshold) + d.Set("port", hchk.Port) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) d.Set("self_link", hchk.SelfLink) return nil