From c62dc3f72fad1bfb026c6f0487bcfafd96903300 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sat, 28 May 2016 23:46:33 +0000 Subject: [PATCH] provider/openstack: Disassociate Monitors from Pool Before Deletion This commit ensures that all monitors have been disassociated from the load balancing pool before the pool is deleted. A test has been added to ensure that a full load balancing stack is capable of handling an update to an instance, causing some components to be rebuilt. --- .../resource_openstack_lb_pool_v1.go | 13 +++ .../resource_openstack_lb_pool_v1_test.go | 104 +++++++++++++++++- 2 files changed, 115 insertions(+), 2 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go index 345d2b166..136b73a42 100644 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go +++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go @@ -299,6 +299,19 @@ func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error creating OpenStack networking client: %s", err) } + // Make sure all monitors are disassociated first + if v, ok := d.GetOk("monitor_ids"); ok { + if monitorIDList, ok := v.([]interface{}); ok { + for _, monitorID := range monitorIDList { + mID := monitorID.(string) + log.Printf("[DEBUG] Attempting to disassociate monitor %s from pool %s", mID, d.Id()) + if res := pools.DisassociateMonitor(networkingClient, d.Id(), mID); res.Err != nil { + return fmt.Errorf("Error disassociating monitor %s from pool %s: %s", mID, d.Id(), err) + } + } + } + } + stateConf := &resource.StateChangeConf{ Pending: []string{"ACTIVE", "PENDING_DELETE"}, Target: []string{"DELETED"}, diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go index 8264a32c9..3608db2e6 100644 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go +++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go @@ -56,7 +56,20 @@ func TestAccLBV1Pool_fullstack(t *testing.T) { CheckDestroy: testAccCheckLBV1PoolDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccLBV1Pool_fullstack, + Config: testAccLBV1Pool_fullstack_1, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.network_1", &network), + testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet), + testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.secgroup_1", &secgroup), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_1", &instance1), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_2", &instance2), + testAccCheckLBV1PoolExists(t, "openstack_lb_pool_v1.pool_1", &pool), + testAccCheckLBV1MonitorExists(t, "openstack_lb_monitor_v1.monitor_1", &monitor), + testAccCheckLBV1VIPExists(t, "openstack_lb_vip_v1.vip_1", &vip), + ), + }, + resource.TestStep{ + Config: testAccLBV1Pool_fullstack_2, Check: resource.ComposeTestCheckFunc( testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.network_1", &network), testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet), @@ -172,7 +185,7 @@ var testAccLBV1Pool_update = fmt.Sprintf(` }`, OS_REGION_NAME, OS_REGION_NAME, OS_REGION_NAME) -var testAccLBV1Pool_fullstack = fmt.Sprintf(` +var testAccLBV1Pool_fullstack_1 = fmt.Sprintf(` resource "openstack_networking_network_v2" "network_1" { name = "network_1" admin_state_up = "true" @@ -257,3 +270,90 @@ var testAccLBV1Pool_fullstack = fmt.Sprintf(` pool_id = "${openstack_lb_pool_v1.pool_1.id}" admin_state_up = true }`) + +var testAccLBV1Pool_fullstack_2 = fmt.Sprintf(` + resource "openstack_networking_network_v2" "network_1" { + name = "network_1" + admin_state_up = "true" + } + + resource "openstack_networking_subnet_v2" "subnet_1" { + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + } + + resource "openstack_compute_secgroup_v2" "secgroup_1" { + name = "secgroup_1" + description = "Rules for secgroup_1" + + rule { + from_port = -1 + to_port = -1 + ip_protocol = "icmp" + cidr = "0.0.0.0/0" + } + + rule { + from_port = 80 + to_port = 80 + ip_protocol = "tcp" + cidr = "0.0.0.0/0" + } + } + + resource "openstack_compute_instance_v2" "instance_1" { + name = "instance_1" + security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] + network { + uuid = "${openstack_networking_network_v2.network_1.id}" + } + } + + resource "openstack_compute_instance_v2" "instance_2" { + name = "instance_2" + security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] + user_data = "#cloud-config\ndisable_root: false" + network { + uuid = "${openstack_networking_network_v2.network_1.id}" + } + } + + resource "openstack_lb_monitor_v1" "monitor_1" { + type = "TCP" + delay = 30 + timeout = 5 + max_retries = 3 + admin_state_up = "true" + } + + resource "openstack_lb_pool_v1" "pool_1" { + name = "pool_1" + protocol = "TCP" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + lb_method = "ROUND_ROBIN" + monitor_ids = ["${openstack_lb_monitor_v1.monitor_1.id}"] + } + + resource "openstack_lb_member_v1" "member_1" { + pool_id = "${openstack_lb_pool_v1.pool_1.id}" + address = "${openstack_compute_instance_v2.instance_1.access_ip_v4}" + port = 80 + admin_state_up = true + } + + resource "openstack_lb_member_v1" "member_2" { + pool_id = "${openstack_lb_pool_v1.pool_1.id}" + address = "${openstack_compute_instance_v2.instance_2.access_ip_v4}" + port = 80 + admin_state_up = true + } + + resource "openstack_lb_vip_v1" "vip_1" { + name = "vip_1" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + protocol = "TCP" + port = 80 + pool_id = "${openstack_lb_pool_v1.pool_1.id}" + admin_state_up = true + }`)