diff --git a/builtin/providers/google/container_operation.go b/builtin/providers/google/container_operation.go new file mode 100644 index 000000000..fb1b9cab8 --- /dev/null +++ b/builtin/providers/google/container_operation.go @@ -0,0 +1,59 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/container/v1" +) + +type ContainerOperationWaiter struct { + Service *container.Service + Op *container.Operation + Project string + Zone string +} + +func (w *ContainerOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: []string{"DONE"}, + Refresh: w.RefreshFunc(), + } +} + +func (w *ContainerOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := w.Service.Projects.Zones.Operations.Get( + w.Project, w.Zone, w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Progress of operation %q: %q", w.Op.Name, resp.Status) + + return resp, resp.Status, err + } +} + +func containerOperationWait(config *Config, op *container.Operation, project, zone, activity string, timeoutMinutes, minTimeoutSeconds int) error { + w := &ContainerOperationWaiter{ + Service: config.clientContainer, + Op: op, + Project: project, + Zone: zone, + } + + state := w.Conf() + state.Timeout = time.Duration(timeoutMinutes) * time.Minute + state.MinTimeout = time.Duration(minTimeoutSeconds) * time.Second + _, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + return nil +} diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index f4d7d5f7b..7984a1f22 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -91,6 +91,7 @@ func Provider() terraform.ResourceProvider { "google_compute_vpn_gateway": resourceComputeVpnGateway(), "google_compute_vpn_tunnel": resourceComputeVpnTunnel(), "google_container_cluster": resourceContainerCluster(), + "google_container_node_pool": resourceContainerNodePool(), "google_dns_managed_zone": resourceDnsManagedZone(), "google_dns_record_set": resourceDnsRecordSet(), "google_sql_database": resourceSqlDatabase(), diff --git a/builtin/providers/google/resource_container_cluster.go b/builtin/providers/google/resource_container_cluster.go index fd9aa43a9..1337e0d92 100644 --- a/builtin/providers/google/resource_container_cluster.go +++ b/builtin/providers/google/resource_container_cluster.go @@ -5,9 +5,7 @@ import ( "log" "net" "regexp" - "time" - "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/container/v1" "google.golang.org/api/googleapi" @@ -389,23 +387,11 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er } // Wait until it's created - wait := resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: []string{"DONE"}, - Timeout: 30 * time.Minute, - MinTimeout: 3 * time.Second, - Refresh: func() (interface{}, string, error) { - resp, err := config.clientContainer.Projects.Zones.Operations.Get( - project, zoneName, op.Name).Do() - log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s", - clusterName, resp.Status) - return resp, resp.Status, err - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err + waitErr := containerOperationWait(config, op, project, zoneName, "creating GKE cluster", 30, 3) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr } log.Printf("[INFO] GKE cluster %s has been created", clusterName) @@ -503,24 +489,9 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - wait := resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: []string{"DONE"}, - Timeout: 10 * time.Minute, - MinTimeout: 2 * time.Second, - Refresh: func() (interface{}, string, error) { - log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName) - resp, err := config.clientContainer.Projects.Zones.Operations.Get( - project, zoneName, op.Name).Do() - log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s", - clusterName, resp.Status) - return resp, resp.Status, err - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err + waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE cluster", 10, 2) + if waitErr != nil { + return waitErr } log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(), @@ -548,24 +519,9 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er } // Wait until it's deleted - wait := resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: []string{"DONE"}, - Timeout: 10 * time.Minute, - MinTimeout: 3 * time.Second, - Refresh: func() (interface{}, string, error) { - log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName) - resp, err := config.clientContainer.Projects.Zones.Operations.Get( - project, zoneName, op.Name).Do() - log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s", - clusterName, resp.Status) - return resp, resp.Status, err - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err + waitErr := containerOperationWait(config, op, project, zoneName, "deleting GKE cluster", 10, 3) + if waitErr != nil { + return waitErr } log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) diff --git a/builtin/providers/google/resource_container_node_pool.go b/builtin/providers/google/resource_container_node_pool.go new file mode 100644 index 000000000..24f2c97a7 --- /dev/null +++ b/builtin/providers/google/resource_container_node_pool.go @@ -0,0 +1,191 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/container/v1" + "google.golang.org/api/googleapi" +) + +func resourceContainerNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerNodePoolCreate, + Read: resourceContainerNodePoolRead, + Delete: resourceContainerNodePoolDelete, + Exists: resourceContainerNodePoolExists, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"name_prefix"}, + ForceNew: true, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cluster": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + cluster := d.Get("cluster").(string) + nodeCount := d.Get("initial_node_count").(int) + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.UniqueId() + } + + nodePool := &container.NodePool{ + Name: name, + InitialNodeCount: int64(nodeCount), + } + + req := &container.CreateNodePoolRequest{ + NodePool: nodePool, + } + + op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Create(project, zone, cluster, req).Do() + + if err != nil { + return fmt.Errorf("Error creating NodePool: %s", err) + } + + waitErr := containerOperationWait(config, op, project, zone, "creating GKE NodePool", 10, 3) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + log.Printf("[INFO] GKE NodePool %s has been created", name) + + d.SetId(name) + + return resourceContainerNodePoolRead(d, meta) +} + +func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + name := d.Get("name").(string) + cluster := d.Get("cluster").(string) + + nodePool, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + project, zone, cluster, name).Do() + if err != nil { + return fmt.Errorf("Error reading NodePool: %s", err) + } + + d.Set("name", nodePool.Name) + d.Set("initial_node_count", nodePool.InitialNodeCount) + + return nil +} + +func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + name := d.Get("name").(string) + cluster := d.Get("cluster").(string) + + op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Delete( + project, zone, cluster, name).Do() + if err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + // Wait until it's deleted + waitErr := containerOperationWait(config, op, project, zone, "deleting GKE NodePool", 10, 2) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return false, err + } + + zone := d.Get("zone").(string) + name := d.Get("name").(string) + cluster := d.Get("cluster").(string) + + _, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + project, zone, cluster, name).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Container NodePool %q because it's gone", name) + // The resource doesn't exist anymore + return false, err + } + // There was some other error in reading the resource + return true, err + } + return true, nil +} diff --git a/builtin/providers/google/resource_container_node_pool_test.go b/builtin/providers/google/resource_container_node_pool_test.go new file mode 100644 index 000000000..a6b0da809 --- /dev/null +++ b/builtin/providers/google/resource_container_node_pool_test.go @@ -0,0 +1,101 @@ +package google + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccContainerNodePool_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerNodePoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerNodePool_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerNodePoolMatches("google_container_node_pool.np"), + ), + }, + }, + }) +} + +func testAccCheckContainerNodePoolDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_container_node_pool" { + continue + } + + attributes := rs.Primary.Attributes + _, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do() + if err == nil { + return fmt.Errorf("NodePool still exists") + } + } + + return nil +} + +func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + attributes := rs.Primary.Attributes + found, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != attributes["name"] { + return fmt.Errorf("NodePool not found") + } + + inc, err := strconv.Atoi(attributes["initial_node_count"]) + if err != nil { + return err + } + if found.InitialNodeCount != int64(inc) { + return fmt.Errorf("Mismatched initialNodeCount. TF State: %s. GCP State: %d", + attributes["initial_node_count"], found.InitialNodeCount) + } + return nil + } +} + +var testAccContainerNodePool_basic = fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + initial_node_count = 3 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +} + +resource "google_container_node_pool" "np" { + name = "tf-nodepool-test-%s" + zone = "us-central1-a" + cluster = "${google_container_cluster.cluster.name}" + initial_node_count = 2 +}`, acctest.RandString(10), acctest.RandString(10))