Use new autoscaler / instance group manager APIs.

This commit is contained in:
Dave Cunningham 2015-07-27 20:47:10 -04:00
parent c6f0bf479b
commit 1ec247ef37
11 changed files with 136 additions and 285 deletions

View File

@ -14,11 +14,9 @@ import (
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt"
"google.golang.org/api/autoscaler/v1beta2"
"google.golang.org/api/compute/v1"
"google.golang.org/api/container/v1"
"google.golang.org/api/dns/v1"
"google.golang.org/api/replicapool/v1beta2"
"google.golang.org/api/storage/v1"
)
@ -29,11 +27,9 @@ type Config struct {
Project string
Region string
clientAutoscaler *autoscaler.Service
clientCompute *compute.Service
clientContainer *container.Service
clientDns *dns.Service
clientReplicaPool *replicapool.Service
clientStorage *storage.Service
}
@ -132,20 +128,6 @@ func (c *Config) loadAndValidate() error {
}
c.clientDns.UserAgent = userAgent
log.Printf("[INFO] Instantiating Google Replica Pool client...")
c.clientReplicaPool, err = replicapool.New(client)
if err != nil {
return err
}
c.clientReplicaPool.UserAgent = userAgent
log.Printf("[INFO] Instantiating Google Autoscaler client...")
c.clientAutoscaler, err = autoscaler.New(client)
if err != nil {
return err
}
c.clientAutoscaler.UserAgent = userAgent
log.Printf("[INFO] Instantiating Google Storage Client...")
c.clientStorage, err = storage.New(client)
if err != nil {

View File

@ -4,9 +4,7 @@ import (
"bytes"
"fmt"
"google.golang.org/api/autoscaler/v1beta2"
"google.golang.org/api/compute/v1"
"google.golang.org/api/replicapool/v1beta2"
"github.com/hashicorp/terraform/helper/resource"
)
@ -26,8 +24,8 @@ type OperationWaiter struct {
Op *compute.Operation
Project string
Region string
Zone string
Type OperationWaitType
Zone string
}
func (w *OperationWaiter) RefreshFunc() resource.StateRefreshFunc {
@ -80,95 +78,3 @@ func (e OperationError) Error() string {
return buf.String()
}
// Replicapool Operations
type ReplicaPoolOperationWaiter struct {
Service *replicapool.Service
Op *replicapool.Operation
Project string
Region string
Zone string
}
func (w *ReplicaPoolOperationWaiter) RefreshFunc() resource.StateRefreshFunc {
return func() (interface{}, string, error) {
var op *replicapool.Operation
var err error
op, err = w.Service.ZoneOperations.Get(
w.Project, w.Zone, w.Op.Name).Do()
if err != nil {
return nil, "", err
}
return op, op.Status, nil
}
}
func (w *ReplicaPoolOperationWaiter) Conf() *resource.StateChangeConf {
return &resource.StateChangeConf{
Pending: []string{"PENDING", "RUNNING"},
Target: "DONE",
Refresh: w.RefreshFunc(),
}
}
// ReplicaPoolOperationError wraps replicapool.OperationError and implements the
// error interface so it can be returned.
type ReplicaPoolOperationError replicapool.OperationError
func (e ReplicaPoolOperationError) Error() string {
var buf bytes.Buffer
for _, err := range e.Errors {
buf.WriteString(err.Message + "\n")
}
return buf.String()
}
// Autoscaler Operations
type AutoscalerOperationWaiter struct {
Service *autoscaler.Service
Op *autoscaler.Operation
Project string
Zone string
}
func (w *AutoscalerOperationWaiter) RefreshFunc() resource.StateRefreshFunc {
return func() (interface{}, string, error) {
var op *autoscaler.Operation
var err error
op, err = w.Service.ZoneOperations.Get(
w.Project, w.Zone, w.Op.Name).Do()
if err != nil {
return nil, "", err
}
return op, op.Status, nil
}
}
func (w *AutoscalerOperationWaiter) Conf() *resource.StateChangeConf {
return &resource.StateChangeConf{
Pending: []string{"PENDING", "RUNNING"},
Target: "DONE",
Refresh: w.RefreshFunc(),
}
}
// AutoscalerOperationError wraps autoscaler.OperationError and implements the
// error interface so it can be returned.
type AutoscalerOperationError autoscaler.OperationError
func (e AutoscalerOperationError) Error() string {
var buf bytes.Buffer
for _, err := range e.Errors {
buf.WriteString(err.Message + "\n")
}
return buf.String()
}

View File

@ -29,7 +29,7 @@ func Provider() terraform.ResourceProvider {
},
ResourcesMap: map[string]*schema.Resource{
"google_autoscaler": resourceAutoscaler(),
"google_compute_autoscaler": resourceComputeAutoscaler(),
"google_compute_address": resourceComputeAddress(),
"google_compute_disk": resourceComputeDisk(),
"google_compute_firewall": resourceComputeFirewall(),
@ -43,7 +43,7 @@ func Provider() terraform.ResourceProvider {
"google_container_cluster": resourceContainerCluster(),
"google_dns_managed_zone": resourceDnsManagedZone(),
"google_dns_record_set": resourceDnsRecordSet(),
"google_replicapool_instance_group_manager": resourceReplicaPoolInstanceGroupManager(),
"google_compute_instance_group_manager": resourceComputeInstanceGroupManager(),
"google_storage_bucket": resourceStorageBucket(),
},

View File

@ -6,16 +6,16 @@ import (
"time"
"google.golang.org/api/googleapi"
"google.golang.org/api/autoscaler/v1beta2"
"google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAutoscaler() *schema.Resource {
func resourceComputeAutoscaler() *schema.Resource {
return &schema.Resource{
Create: resourceAutoscalerCreate,
Read: resourceAutoscalerRead,
Update: resourceAutoscalerUpdate,
Delete: resourceAutoscalerDelete,
Create: resourceComputeAutoscalerCreate,
Read: resourceComputeAutoscalerRead,
Update: resourceComputeAutoscalerUpdate,
Delete: resourceComputeAutoscalerDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
@ -120,10 +120,10 @@ func resourceAutoscaler() *schema.Resource {
}
}
func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) {
func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) {
// Build the parameter
scaler := &autoscaler.Autoscaler{
scaler := &compute.Autoscaler{
Name: d.Get("name").(string),
Target: d.Get("target").(string),
}
@ -140,7 +140,7 @@ func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) {
prefix := "autoscaling_policy.0."
scaler.AutoscalingPolicy = &autoscaler.AutoscalingPolicy{
scaler.AutoscalingPolicy = &compute.AutoscalingPolicy{
MaxNumReplicas: int64(d.Get(prefix + "max_replicas").(int)),
MinNumReplicas: int64(d.Get(prefix + "min_replicas").(int)),
CoolDownPeriodSec: int64(d.Get(prefix + "cooldown_period").(int)),
@ -156,7 +156,7 @@ func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) {
return nil, fmt.Errorf("The autoscaling_policy must have exactly one cpu_utilization, found %d.", cpuUtilCount)
}
policyCounter++
scaler.AutoscalingPolicy.CpuUtilization = &autoscaler.AutoscalingPolicyCpuUtilization{
scaler.AutoscalingPolicy.CpuUtilization = &compute.AutoscalingPolicyCpuUtilization{
UtilizationTarget: d.Get(prefix + "cpu_utilization.0.target").(float64),
}
}
@ -168,7 +168,7 @@ func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) {
if metricCount != 1 {
return nil, fmt.Errorf("The autoscaling_policy must have exactly one metric, found %d.", metricCount)
}
scaler.AutoscalingPolicy.CustomMetricUtilizations = []*autoscaler.AutoscalingPolicyCustomMetricUtilization{
scaler.AutoscalingPolicy.CustomMetricUtilizations = []*compute.AutoscalingPolicyCustomMetricUtilization{
{
Metric: d.Get(prefix + "metric.0.name").(string),
UtilizationTarget: d.Get(prefix + "metric.0.target").(float64),
@ -185,7 +185,7 @@ func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) {
if lbuCount != 1 {
return nil, fmt.Errorf("The autoscaling_policy must have exactly one load_balancing_utilization, found %d.", lbuCount)
}
scaler.AutoscalingPolicy.LoadBalancingUtilization = &autoscaler.AutoscalingPolicyLoadBalancingUtilization{
scaler.AutoscalingPolicy.LoadBalancingUtilization = &compute.AutoscalingPolicyLoadBalancingUtilization{
UtilizationTarget: d.Get(prefix + "load_balancing_utilization.0.target").(float64),
}
}
@ -198,7 +198,7 @@ func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) {
return scaler, nil
}
func resourceAutoscalerCreate(d *schema.ResourceData, meta interface{}) error {
func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
// Get the zone
@ -215,7 +215,7 @@ func resourceAutoscalerCreate(d *schema.ResourceData, meta interface{}) error {
return err
}
op, err := config.clientAutoscaler.Autoscalers.Insert(
op, err := config.clientCompute.Autoscalers.Insert(
config.Project, zone.Name, scaler).Do()
if err != nil {
return fmt.Errorf("Error creating Autoscaler: %s", err)
@ -225,10 +225,11 @@ func resourceAutoscalerCreate(d *schema.ResourceData, meta interface{}) error {
d.SetId(scaler.Name)
// Wait for the operation to complete
w := &AutoscalerOperationWaiter{
Service: config.clientAutoscaler,
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Type: OperationWaitZone,
Zone: zone.Name,
}
state := w.Conf()
@ -238,23 +239,23 @@ func resourceAutoscalerCreate(d *schema.ResourceData, meta interface{}) error {
if err != nil {
return fmt.Errorf("Error waiting for Autoscaler to create: %s", err)
}
op = opRaw.(*autoscaler.Operation)
op = opRaw.(*compute.Operation)
if op.Error != nil {
// The resource didn't actually create
d.SetId("")
// Return the error
return AutoscalerOperationError(*op.Error)
return OperationError(*op.Error)
}
return resourceAutoscalerRead(d, meta)
return resourceComputeAutoscalerRead(d, meta)
}
func resourceAutoscalerRead(d *schema.ResourceData, meta interface{}) error {
func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zone := d.Get("zone").(string)
scaler, err := config.clientAutoscaler.Autoscalers.Get(
scaler, err := config.clientCompute.Autoscalers.Get(
config.Project, zone, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
@ -272,7 +273,7 @@ func resourceAutoscalerRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error {
func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zone := d.Get("zone").(string)
@ -282,7 +283,7 @@ func resourceAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error {
return err
}
op, err := config.clientAutoscaler.Autoscalers.Patch(
op, err := config.clientCompute.Autoscalers.Patch(
config.Project, zone, d.Id(), scaler).Do()
if err != nil {
return fmt.Errorf("Error updating Autoscaler: %s", err)
@ -292,10 +293,11 @@ func resourceAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error {
d.SetId(scaler.Name)
// Wait for the operation to complete
w := &AutoscalerOperationWaiter{
Service: config.clientAutoscaler,
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Type: OperationWaitZone,
Zone: zone,
}
state := w.Conf()
@ -305,30 +307,31 @@ func resourceAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error {
if err != nil {
return fmt.Errorf("Error waiting for Autoscaler to update: %s", err)
}
op = opRaw.(*autoscaler.Operation)
op = opRaw.(*compute.Operation)
if op.Error != nil {
// Return the error
return AutoscalerOperationError(*op.Error)
return OperationError(*op.Error)
}
return resourceAutoscalerRead(d, meta)
return resourceComputeAutoscalerRead(d, meta)
}
func resourceAutoscalerDelete(d *schema.ResourceData, meta interface{}) error {
func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zone := d.Get("zone").(string)
op, err := config.clientAutoscaler.Autoscalers.Delete(
op, err := config.clientCompute.Autoscalers.Delete(
config.Project, zone, d.Id()).Do()
if err != nil {
return fmt.Errorf("Error deleting autoscaler: %s", err)
}
// Wait for the operation to complete
w := &AutoscalerOperationWaiter{
Service: config.clientAutoscaler,
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Type: OperationWaitZone,
Zone: zone,
}
state := w.Conf()
@ -338,10 +341,10 @@ func resourceAutoscalerDelete(d *schema.ResourceData, meta interface{}) error {
if err != nil {
return fmt.Errorf("Error waiting for Autoscaler to delete: %s", err)
}
op = opRaw.(*autoscaler.Operation)
op = opRaw.(*compute.Operation)
if op.Error != nil {
// Return the error
return AutoscalerOperationError(*op.Error)
return OperationError(*op.Error)
}
d.SetId("")

View File

@ -4,13 +4,13 @@ import (
"fmt"
"testing"
"google.golang.org/api/autoscaler/v1beta2"
"google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAutoscaler_basic(t *testing.T) {
var ascaler autoscaler.Autoscaler
var ascaler compute.Autoscaler
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -21,7 +21,7 @@ func TestAccAutoscaler_basic(t *testing.T) {
Config: testAccAutoscaler_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckAutoscalerExists(
"google_autoscaler.foobar", &ascaler),
"google_compute_autoscaler.foobar", &ascaler),
),
},
},
@ -29,7 +29,7 @@ func TestAccAutoscaler_basic(t *testing.T) {
}
func TestAccAutoscaler_update(t *testing.T) {
var ascaler autoscaler.Autoscaler
var ascaler compute.Autoscaler
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -40,16 +40,16 @@ func TestAccAutoscaler_update(t *testing.T) {
Config: testAccAutoscaler_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckAutoscalerExists(
"google_autoscaler.foobar", &ascaler),
"google_compute_autoscaler.foobar", &ascaler),
),
},
resource.TestStep{
Config: testAccAutoscaler_update,
Check: resource.ComposeTestCheckFunc(
testAccCheckAutoscalerExists(
"google_autoscaler.foobar", &ascaler),
"google_compute_autoscaler.foobar", &ascaler),
testAccCheckAutoscalerUpdated(
"google_autoscaler.foobar", 10),
"google_compute_autoscaler.foobar", 10),
),
},
},
@ -60,11 +60,11 @@ func testAccCheckAutoscalerDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_autoscaler" {
if rs.Type != "google_compute_autoscaler" {
continue
}
_, err := config.clientAutoscaler.Autoscalers.Get(
_, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err == nil {
return fmt.Errorf("Autoscaler still exists")
@ -74,7 +74,7 @@ func testAccCheckAutoscalerDestroy(s *terraform.State) error {
return nil
}
func testAccCheckAutoscalerExists(n string, ascaler *autoscaler.Autoscaler) resource.TestCheckFunc {
func testAccCheckAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@ -87,7 +87,7 @@ func testAccCheckAutoscalerExists(n string, ascaler *autoscaler.Autoscaler) reso
config := testAccProvider.Meta().(*Config)
found, err := config.clientAutoscaler.Autoscalers.Get(
found, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
@ -116,7 +116,7 @@ func testAccCheckAutoscalerUpdated(n string, max int64) resource.TestCheckFunc {
config := testAccProvider.Meta().(*Config)
ascaler, err := config.clientAutoscaler.Autoscalers.Get(
ascaler, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
@ -162,7 +162,7 @@ resource "google_compute_target_pool" "foobar" {
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_replicapool_instance_group_manager" "foobar" {
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "terraform-test-groupmanager"
instance_template = "${google_compute_instance_template.foobar.self_link}"
@ -171,11 +171,11 @@ resource "google_replicapool_instance_group_manager" "foobar" {
zone = "us-central1-a"
}
resource "google_autoscaler" "foobar" {
resource "google_compute_autoscaler" "foobar" {
description = "Resource created for Terraform acceptance testing"
name = "terraform-test-ascaler"
zone = "us-central1-a"
target = "${google_replicapool_instance_group_manager.foobar.self_link}"
target = "${google_compute_instance_group_manager.foobar.self_link}"
autoscaling_policy = {
max_replicas = 5
min_replicas = 0
@ -219,7 +219,7 @@ resource "google_compute_target_pool" "foobar" {
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_replicapool_instance_group_manager" "foobar" {
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "terraform-test-groupmanager"
instance_template = "${google_compute_instance_template.foobar.self_link}"
@ -228,11 +228,11 @@ resource "google_replicapool_instance_group_manager" "foobar" {
zone = "us-central1-a"
}
resource "google_autoscaler" "foobar" {
resource "google_compute_autoscaler" "foobar" {
description = "Resource created for Terraform acceptance testing"
name = "terraform-test-ascaler"
zone = "us-central1-a"
target = "${google_replicapool_instance_group_manager.foobar.self_link}"
target = "${google_compute_instance_group_manager.foobar.self_link}"
autoscaling_policy = {
max_replicas = 10
min_replicas = 0

View File

@ -6,18 +6,18 @@ import (
"time"
"google.golang.org/api/googleapi"
"google.golang.org/api/replicapool/v1beta2"
"google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceReplicaPoolInstanceGroupManager() *schema.Resource {
func resourceComputeInstanceGroupManager() *schema.Resource {
return &schema.Resource{
Create: resourceReplicaPoolInstanceGroupManagerCreate,
Read: resourceReplicaPoolInstanceGroupManagerRead,
Update: resourceReplicaPoolInstanceGroupManagerUpdate,
Delete: resourceReplicaPoolInstanceGroupManagerDelete,
Create: resourceComputeInstanceGroupManagerCreate,
Read: resourceComputeInstanceGroupManagerRead,
Update: resourceComputeInstanceGroupManagerUpdate,
Delete: resourceComputeInstanceGroupManagerDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
@ -38,17 +38,12 @@ func resourceReplicaPoolInstanceGroupManager() *schema.Resource {
ForceNew: true,
},
"current_size": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"fingerprint": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"group": &schema.Schema{
"instance_group": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
@ -87,14 +82,15 @@ func resourceReplicaPoolInstanceGroupManager() *schema.Resource {
}
}
func waitOpZone(config *Config, op *replicapool.Operation, zone string,
resource string, action string) (*replicapool.Operation, error) {
func waitOpZone(config *Config, op *compute.Operation, zone string,
resource string, action string) (*compute.Operation, error) {
w := &ReplicaPoolOperationWaiter{
Service: config.clientReplicaPool,
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Zone: zone,
Type: OperationWaitZone,
}
state := w.Conf()
state.Timeout = 2 * time.Minute
@ -103,10 +99,10 @@ func waitOpZone(config *Config, op *replicapool.Operation, zone string,
if err != nil {
return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err)
}
return opRaw.(*replicapool.Operation), nil
return opRaw.(*compute.Operation), nil
}
func resourceReplicaPoolInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error {
func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
// Get group size, default to 1 if not given
@ -116,10 +112,11 @@ func resourceReplicaPoolInstanceGroupManagerCreate(d *schema.ResourceData, meta
}
// Build the parameter
manager := &replicapool.InstanceGroupManager{
manager := &compute.InstanceGroupManager{
Name: d.Get("name").(string),
BaseInstanceName: d.Get("base_instance_name").(string),
InstanceTemplate: d.Get("instance_template").(string),
TargetSize: target_size,
}
// Set optional fields
@ -136,8 +133,8 @@ func resourceReplicaPoolInstanceGroupManagerCreate(d *schema.ResourceData, meta
}
log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager)
op, err := config.clientReplicaPool.InstanceGroupManagers.Insert(
config.Project, d.Get("zone").(string), target_size, manager).Do()
op, err := config.clientCompute.InstanceGroupManagers.Insert(
config.Project, d.Get("zone").(string), manager).Do()
if err != nil {
return fmt.Errorf("Error creating InstanceGroupManager: %s", err)
}
@ -154,16 +151,16 @@ func resourceReplicaPoolInstanceGroupManagerCreate(d *schema.ResourceData, meta
// The resource didn't actually create
d.SetId("")
// Return the error
return ReplicaPoolOperationError(*op.Error)
return OperationError(*op.Error)
}
return resourceReplicaPoolInstanceGroupManagerRead(d, meta)
return resourceComputeInstanceGroupManagerRead(d, meta)
}
func resourceReplicaPoolInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error {
func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
manager, err := config.clientReplicaPool.InstanceGroupManagers.Get(
manager, err := config.clientCompute.InstanceGroupManagers.Get(
config.Project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
@ -177,15 +174,14 @@ func resourceReplicaPoolInstanceGroupManagerRead(d *schema.ResourceData, meta in
}
// Set computed fields
d.Set("current_size", manager.CurrentSize)
d.Set("fingerprint", manager.Fingerprint)
d.Set("group", manager.Group)
d.Set("instance_group", manager.InstanceGroup)
d.Set("target_size", manager.TargetSize)
d.Set("self_link", manager.SelfLink)
return nil
}
func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error {
func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
d.Partial(true)
@ -200,12 +196,12 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta
}
// Build the parameter
setTargetPools := &replicapool.InstanceGroupManagersSetTargetPoolsRequest{
setTargetPools := &compute.InstanceGroupManagersSetTargetPoolsRequest{
Fingerprint: d.Get("fingerprint").(string),
TargetPools: targetPools,
}
op, err := config.clientReplicaPool.InstanceGroupManagers.SetTargetPools(
op, err := config.clientCompute.InstanceGroupManagers.SetTargetPools(
config.Project, d.Get("zone").(string), d.Id(), setTargetPools).Do()
if err != nil {
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
@ -217,7 +213,7 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta
return err
}
if op.Error != nil {
return ReplicaPoolOperationError(*op.Error)
return OperationError(*op.Error)
}
d.SetPartial("target_pools")
@ -226,11 +222,11 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta
// If instance_template changes then update
if d.HasChange("instance_template") {
// Build the parameter
setInstanceTemplate := &replicapool.InstanceGroupManagersSetInstanceTemplateRequest{
setInstanceTemplate := &compute.InstanceGroupManagersSetInstanceTemplateRequest{
InstanceTemplate: d.Get("instance_template").(string),
}
op, err := config.clientReplicaPool.InstanceGroupManagers.SetInstanceTemplate(
op, err := config.clientCompute.InstanceGroupManagers.SetInstanceTemplate(
config.Project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do()
if err != nil {
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
@ -242,7 +238,7 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta
return err
}
if op.Error != nil {
return ReplicaPoolOperationError(*op.Error)
return OperationError(*op.Error)
}
d.SetPartial("instance_template")
@ -254,7 +250,7 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta
// Only do anything if the new size is set
target_size := int64(v.(int))
op, err := config.clientReplicaPool.InstanceGroupManagers.Resize(
op, err := config.clientCompute.InstanceGroupManagers.Resize(
config.Project, d.Get("zone").(string), d.Id(), target_size).Do()
if err != nil {
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
@ -266,7 +262,7 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta
return err
}
if op.Error != nil {
return ReplicaPoolOperationError(*op.Error)
return OperationError(*op.Error)
}
}
@ -275,39 +271,29 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta
d.Partial(false)
return resourceReplicaPoolInstanceGroupManagerRead(d, meta)
return resourceComputeInstanceGroupManagerRead(d, meta)
}
func resourceReplicaPoolInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error {
func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zone := d.Get("zone").(string)
op, err := config.clientReplicaPool.InstanceGroupManagers.Delete(config.Project, zone, d.Id()).Do()
op, err := config.clientCompute.InstanceGroupManagers.Delete(config.Project, zone, d.Id()).Do()
if err != nil {
return fmt.Errorf("Error deleting instance group manager: %s", err)
}
// Wait for the operation to complete
w := &ReplicaPoolOperationWaiter{
Service: config.clientReplicaPool,
Op: op,
Project: config.Project,
Zone: d.Get("zone").(string),
}
state := w.Conf()
state.Timeout = 2 * time.Minute
state.MinTimeout = 1 * time.Second
opRaw, err := state.WaitForState()
op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "delete")
if err != nil {
return fmt.Errorf("Error waiting for InstanceGroupManager to delete: %s", err)
return err
}
op = opRaw.(*replicapool.Operation)
if op.Error != nil {
// The resource didn't actually create
d.SetId("")
// Return the error
return ReplicaPoolOperationError(*op.Error)
return OperationError(*op.Error)
}
d.SetId("")

View File

@ -4,14 +4,14 @@ import (
"fmt"
"testing"
"google.golang.org/api/replicapool/v1beta2"
"google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccInstanceGroupManager_basic(t *testing.T) {
var manager replicapool.InstanceGroupManager
var manager compute.InstanceGroupManager
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -22,7 +22,7 @@ func TestAccInstanceGroupManager_basic(t *testing.T) {
Config: testAccInstanceGroupManager_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerExists(
"google_replicapool_instance_group_manager.foobar", &manager),
"google_compute_instance_group_manager.foobar", &manager),
),
},
},
@ -30,7 +30,7 @@ func TestAccInstanceGroupManager_basic(t *testing.T) {
}
func TestAccInstanceGroupManager_update(t *testing.T) {
var manager replicapool.InstanceGroupManager
var manager compute.InstanceGroupManager
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -41,23 +41,23 @@ func TestAccInstanceGroupManager_update(t *testing.T) {
Config: testAccInstanceGroupManager_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerExists(
"google_replicapool_instance_group_manager.foobar", &manager),
"google_compute_instance_group_manager.foobar", &manager),
),
},
resource.TestStep{
Config: testAccInstanceGroupManager_update,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerExists(
"google_replicapool_instance_group_manager.foobar", &manager),
"google_compute_instance_group_manager.foobar", &manager),
),
},
resource.TestStep{
Config: testAccInstanceGroupManager_update2,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerExists(
"google_replicapool_instance_group_manager.foobar", &manager),
"google_compute_instance_group_manager.foobar", &manager),
testAccCheckInstanceGroupManagerUpdated(
"google_replicapool_instance_group_manager.foobar", 3,
"google_compute_instance_group_manager.foobar", 3,
"google_compute_target_pool.foobaz", "terraform-test-foobaz"),
),
},
@ -69,10 +69,10 @@ func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_replicapool_instance_group_manager" {
if rs.Type != "google_compute_instance_group_manager" {
continue
}
_, err := config.clientReplicaPool.InstanceGroupManagers.Get(
_, err := config.clientCompute.InstanceGroupManagers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return fmt.Errorf("InstanceGroupManager still exists")
@ -82,7 +82,7 @@ func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {
return nil
}
func testAccCheckInstanceGroupManagerExists(n string, manager *replicapool.InstanceGroupManager) resource.TestCheckFunc {
func testAccCheckInstanceGroupManagerExists(n string, manager *compute.InstanceGroupManager) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@ -95,7 +95,7 @@ func testAccCheckInstanceGroupManagerExists(n string, manager *replicapool.Insta
config := testAccProvider.Meta().(*Config)
found, err := config.clientReplicaPool.InstanceGroupManagers.Get(
found, err := config.clientCompute.InstanceGroupManagers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
@ -124,38 +124,18 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st
config := testAccProvider.Meta().(*Config)
manager, err := config.clientReplicaPool.InstanceGroupManagers.Get(
manager, err := config.clientCompute.InstanceGroupManagers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
}
// check that total instance count is "size"
if manager.CurrentSize != size {
// Cannot check the target pool as the instance creation is asynchronous. However, can
// check the target_size.
if manager.TargetSize != size {
return fmt.Errorf("instance count incorrect")
}
// check that at least one instance exists in "targetpool"
tp, ok := s.RootModule().Resources[targetPool]
if !ok {
return fmt.Errorf("Not found: %s", targetPool)
}
if tp.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
targetpool, err := config.clientCompute.TargetPools.Get(
config.Project, config.Region, tp.Primary.ID).Do()
if err != nil {
return err
}
// check that total instance count is "size"
if len(targetpool.Instances) == 0 {
return fmt.Errorf("no instance in new targetpool")
}
// check that the instance template updated
instanceTemplate, err := config.clientCompute.InstanceTemplates.Get(
config.Project, template).Do()
@ -203,13 +183,13 @@ resource "google_compute_target_pool" "foobar" {
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_replicapool_instance_group_manager" "foobar" {
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "terraform-test"
instance_template = "${google_compute_instance_template.foobar.self_link}"
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
base_instance_name = "foobar"
zone = "us-central1-a"
zone = "us-central1-c"
target_size = 2
}`
@ -276,13 +256,13 @@ resource "google_compute_target_pool" "foobaz" {
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_replicapool_instance_group_manager" "foobar" {
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "terraform-test"
instance_template = "${google_compute_instance_template.foobar.self_link}"
target_pools = ["${google_compute_target_pool.foobaz.self_link}"]
base_instance_name = "foobar"
zone = "us-central1-a"
zone = "us-central1-c"
target_size = 2
}`
@ -349,12 +329,12 @@ resource "google_compute_target_pool" "foobaz" {
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_replicapool_instance_group_manager" "foobar" {
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "terraform-test"
instance_template = "${google_compute_instance_template.foobaz.self_link}"
target_pools = ["${google_compute_target_pool.foobaz.self_link}"]
base_instance_name = "foobar"
zone = "us-central1-a"
zone = "us-central1-c"
target_size = 3
}`

View File

@ -24,7 +24,7 @@ func TestAccComputeInstanceTemplate_basic(t *testing.T) {
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"),
testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814", true, true),
),
},
},
@ -64,7 +64,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false),
),
},

View File

@ -1,15 +1,12 @@
---
layout: "google"
page_title: "Google: google_autoscaler"
sidebar_current: "docs-google-resource-autoscaler"
page_title: "Google: google_compute_autoscaler"
sidebar_current: "docs-google-resource-compute-autoscaler"
description: |-
Managers an Instance Group within GCE.
Manages an Autoscaler within GCE.
---
# google\_autoscaler
**Note**: This resource is in
[beta](https://cloud.google.com/terms/launch-stages).
# google\_compute\_autoscaler
A Compute Engine Autoscaler automatically adds or removes virtual machines from
a managed instance group based on increases or decreases in load. This allows
@ -51,7 +48,7 @@ resource "google_compute_target_pool" "foobar" {
name = "foobar"
}
resource "google_replicapool_instance_group_manager" "foobar" {
resource "google_compute_instance_group_manager" "foobar" {
name = "foobar"
instance_template = "${google_compute_instance_template.foobar.self_link}"
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
@ -59,10 +56,10 @@ resource "google_replicapool_instance_group_manager" "foobar" {
zone = "us-central1-f"
}
resource "google_autoscaler" "foobar" {
resource "google_compute_autoscaler" "foobar" {
name = "foobar"
zone = "us-central1-f"
target = "${google_replicapool_instance_group_manager.foobar.self_link}"
target = "${google_compute_instance_group_manager.foobar.self_link}"
autoscaling_policy = {
max_replicas = 5
min_replicas = 1

View File

@ -1,15 +1,12 @@
---
layout: "google"
page_title: "Google: google_replicapool_instance_group_manager"
sidebar_current: "docs-google-resource-instance_group_manager"
page_title: "Google: google_compute_instance_group_manager"
sidebar_current: "docs-google-resource-compute-instance_group_manager"
description: |-
Manages an Instance Group within GCE.
---
# google\_replicapool\_instance\_group\_manager
**Note**: This resource is in
[beta](https://cloud.google.com/terms/launch-stages).
# google\_compute\_instance\_group\_manager
The Google Compute Engine Instance Group Manager API creates and manages pools
of homogeneous Compute Engine virtual machine instances from a common instance
@ -19,7 +16,7 @@ and [API](https://cloud.google.com/compute/docs/instance-groups/manager/v1beta2/
## Example Usage
```
resource "google_replicapool_instance_group_manager" "foobar" {
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "terraform-test"
instance_template = "${google_compute_instance_template.foobar.self_link}"
@ -63,6 +60,6 @@ affect existing instances.
The following attributes are exported:
* `group` - The full URL of the instance group created by the manager.
* `instance_group` - The full URL of the instance group created by the manager.
* `self_link` - The URL of the created resource.

View File

@ -65,12 +65,12 @@
<a href="/docs/providers/google/r/dns_record_set.html">google_dns_record_set</a>
</li>
<li<%= sidebar_current("docs-google-resource-replicapool-instance-group-manager") %>>
<a href="/docs/providers/google/r/replicapool_instance_group_manager.html">google_replicapool_instance_group_manager</a>
<li<%= sidebar_current("docs-google-resource--compute-instance-group-manager") %>>
<a href="/docs/providers/google/r/compute_instance_group_manager.html">google_compute_instance_group_manager</a>
</li>
<li<%= sidebar_current("docs-google-resource-autoscaler") %>>
<a href="/docs/providers/google/r/autoscaler.html">google_autoscaler</a>
<li<%= sidebar_current("docs-google-resource-compute-autoscaler") %>>
<a href="/docs/providers/google/r/compute_autoscaler.html">google_compute_autoscaler</a>
</li>
<li<%= sidebar_current("docs-google-resource-storage-bucket") %>>